aboutsummaryrefslogtreecommitdiffstats
path: root/arch/s390
diff options
context:
space:
mode:
authorArnd Bergmann <arnd@arndb.de>2012-10-04 16:57:00 -0400
committerArnd Bergmann <arnd@arndb.de>2012-10-04 16:57:51 -0400
commitc37d6154c0b9163c27e53cc1d0be3867b4abd760 (patch)
tree7a24522c56d1cb284dff1d3c225bbdaba0901bb5 /arch/s390
parente7a570ff7dff9af6e54ff5e580a61ec7652137a0 (diff)
parent8a1ab3155c2ac7fbe5f2038d6e26efeb607a1498 (diff)
Merge branch 'disintegrate-asm-generic' of git://git.infradead.org/users/dhowells/linux-headers into asm-generic
Patches from David Howells <dhowells@redhat.com>: This is to complete part of the UAPI disintegration for which the preparatory patches were pulled recently. Note that there are some fixup patches which are at the base of the branch aimed at you, plus all arches get the asm-generic branch merged in too. * 'disintegrate-asm-generic' of git://git.infradead.org/users/dhowells/linux-headers: UAPI: (Scripted) Disintegrate include/asm-generic UAPI: Fix conditional header installation handling (notably kvm_para.h on m68k) c6x: remove c6x signal.h UAPI: Split compound conditionals containing __KERNEL__ in Arm64 UAPI: Fix the guards on various asm/unistd.h files Signed-off-by: Arnd Bergmann <arnd@arndb.de>
Diffstat (limited to 'arch/s390')
-rw-r--r--arch/s390/Kbuild1
-rw-r--r--arch/s390/Kconfig421
-rw-r--r--arch/s390/boot/compressed/Makefile1
-rw-r--r--arch/s390/boot/compressed/misc.c45
-rw-r--r--arch/s390/defconfig14
-rw-r--r--arch/s390/hypfs/inode.c22
-rw-r--r--arch/s390/include/asm/appldata.h2
-rw-r--r--arch/s390/include/asm/chsc.h28
-rw-r--r--arch/s390/include/asm/cio.h28
-rw-r--r--arch/s390/include/asm/cmpxchg.h61
-rw-r--r--arch/s390/include/asm/cpu_mf.h4
-rw-r--r--arch/s390/include/asm/cputime.h3
-rw-r--r--arch/s390/include/asm/css_chars.h39
-rw-r--r--arch/s390/include/asm/eadm.h124
-rw-r--r--arch/s390/include/asm/elf.h3
-rw-r--r--arch/s390/include/asm/etr.h8
-rw-r--r--arch/s390/include/asm/irq.h2
-rw-r--r--arch/s390/include/asm/isc.h1
-rw-r--r--arch/s390/include/asm/lowcore.h6
-rw-r--r--arch/s390/include/asm/mmu_context.h2
-rw-r--r--arch/s390/include/asm/percpu.h50
-rw-r--r--arch/s390/include/asm/processor.h59
-rw-r--r--arch/s390/include/asm/ptrace.h12
-rw-r--r--arch/s390/include/asm/runtime_instr.h98
-rw-r--r--arch/s390/include/asm/scsw.h38
-rw-r--r--arch/s390/include/asm/setup.h5
-rw-r--r--arch/s390/include/asm/smp.h4
-rw-r--r--arch/s390/include/asm/string.h8
-rw-r--r--arch/s390/include/asm/switch_to.h6
-rw-r--r--arch/s390/include/asm/sysinfo.h39
-rw-r--r--arch/s390/include/asm/topology.h20
-rw-r--r--arch/s390/include/asm/uaccess.h15
-rw-r--r--arch/s390/include/asm/unistd.h4
-rw-r--r--arch/s390/include/uapi/asm/Kbuild3
-rw-r--r--arch/s390/kernel/Makefile12
-rw-r--r--arch/s390/kernel/asm-offsets.c2
-rw-r--r--arch/s390/kernel/cache.c385
-rw-r--r--arch/s390/kernel/compat_linux.c36
-rw-r--r--arch/s390/kernel/compat_wrapper.S13
-rw-r--r--arch/s390/kernel/crash.c14
-rw-r--r--arch/s390/kernel/crash_dump.c3
-rw-r--r--arch/s390/kernel/dis.c58
-rw-r--r--arch/s390/kernel/early.c38
-rw-r--r--arch/s390/kernel/entry64.S17
-rw-r--r--arch/s390/kernel/irq.c56
-rw-r--r--arch/s390/kernel/kprobes.c2
-rw-r--r--arch/s390/kernel/lgr.c29
-rw-r--r--arch/s390/kernel/machine_kexec.c9
-rw-r--r--arch/s390/kernel/process.c8
-rw-r--r--arch/s390/kernel/processor.c7
-rw-r--r--arch/s390/kernel/ptrace.c70
-rw-r--r--arch/s390/kernel/runtime_instr.c150
-rw-r--r--arch/s390/kernel/s390_ksyms.c2
-rw-r--r--arch/s390/kernel/setup.c54
-rw-r--r--arch/s390/kernel/smp.c46
-rw-r--r--arch/s390/kernel/syscalls.S2
-rw-r--r--arch/s390/kernel/sysinfo.c351
-rw-r--r--arch/s390/kernel/time.c4
-rw-r--r--arch/s390/kernel/topology.c27
-rw-r--r--arch/s390/kernel/traps.c41
-rw-r--r--arch/s390/kernel/vdso.c8
-rw-r--r--arch/s390/kernel/vtime.c11
-rw-r--r--arch/s390/kvm/Kconfig2
-rw-r--r--arch/s390/kvm/priv.c4
-rw-r--r--arch/s390/lib/Makefile3
-rw-r--r--arch/s390/lib/mem32.S92
-rw-r--r--arch/s390/lib/mem64.S88
-rw-r--r--arch/s390/lib/string.c56
-rw-r--r--arch/s390/mm/Makefile2
-rw-r--r--arch/s390/mm/extable.c81
-rw-r--r--arch/s390/mm/fault.c7
-rw-r--r--arch/s390/mm/gup.c37
-rw-r--r--arch/s390/mm/init.c4
-rw-r--r--arch/s390/mm/pgtable.c6
-rw-r--r--arch/s390/mm/vmem.c2
-rw-r--r--arch/s390/net/Makefile4
-rw-r--r--arch/s390/net/bpf_jit.S130
-rw-r--r--arch/s390/net/bpf_jit_comp.c776
78 files changed, 3122 insertions, 803 deletions
diff --git a/arch/s390/Kbuild b/arch/s390/Kbuild
index 9858476fa0fe..cc45d25487b0 100644
--- a/arch/s390/Kbuild
+++ b/arch/s390/Kbuild
@@ -5,3 +5,4 @@ obj-$(CONFIG_CRYPTO_HW) += crypto/
5obj-$(CONFIG_S390_HYPFS_FS) += hypfs/ 5obj-$(CONFIG_S390_HYPFS_FS) += hypfs/
6obj-$(CONFIG_APPLDATA_BASE) += appldata/ 6obj-$(CONFIG_APPLDATA_BASE) += appldata/
7obj-$(CONFIG_MATHEMU) += math-emu/ 7obj-$(CONFIG_MATHEMU) += math-emu/
8obj-y += net/
diff --git a/arch/s390/Kconfig b/arch/s390/Kconfig
index 107610e01a29..f9acddd9ace3 100644
--- a/arch/s390/Kconfig
+++ b/arch/s390/Kconfig
@@ -49,10 +49,13 @@ config GENERIC_LOCKBREAK
49config PGSTE 49config PGSTE
50 def_bool y if KVM 50 def_bool y if KVM
51 51
52config VIRT_CPU_ACCOUNTING 52config ARCH_SUPPORTS_DEBUG_PAGEALLOC
53 def_bool y 53 def_bool y
54 54
55config ARCH_SUPPORTS_DEBUG_PAGEALLOC 55config KEXEC
56 def_bool y
57
58config AUDIT_ARCH
56 def_bool y 59 def_bool y
57 60
58config S390 61config S390
@@ -84,11 +87,15 @@ config S390
84 select HAVE_KERNEL_XZ 87 select HAVE_KERNEL_XZ
85 select HAVE_ARCH_MUTEX_CPU_RELAX 88 select HAVE_ARCH_MUTEX_CPU_RELAX
86 select HAVE_ARCH_JUMP_LABEL if !MARCH_G5 89 select HAVE_ARCH_JUMP_LABEL if !MARCH_G5
90 select HAVE_BPF_JIT if 64BIT && PACK_STACK
87 select ARCH_SAVE_PAGE_KEYS if HIBERNATION 91 select ARCH_SAVE_PAGE_KEYS if HIBERNATION
88 select ARCH_HAS_ATOMIC64_DEC_IF_POSITIVE 92 select ARCH_HAS_ATOMIC64_DEC_IF_POSITIVE
89 select HAVE_MEMBLOCK 93 select HAVE_MEMBLOCK
90 select HAVE_MEMBLOCK_NODE_MAP 94 select HAVE_MEMBLOCK_NODE_MAP
91 select HAVE_CMPXCHG_LOCAL 95 select HAVE_CMPXCHG_LOCAL
96 select HAVE_CMPXCHG_DOUBLE
97 select HAVE_VIRT_CPU_ACCOUNTING
98 select VIRT_CPU_ACCOUNTING
92 select ARCH_DISCARD_MEMBLOCK 99 select ARCH_DISCARD_MEMBLOCK
93 select BUILDTIME_EXTABLE_SORT 100 select BUILDTIME_EXTABLE_SORT
94 select ARCH_INLINE_SPIN_TRYLOCK 101 select ARCH_INLINE_SPIN_TRYLOCK
@@ -133,9 +140,79 @@ source "init/Kconfig"
133 140
134source "kernel/Kconfig.freezer" 141source "kernel/Kconfig.freezer"
135 142
136menu "Base setup" 143menu "Processor type and features"
144
145config HAVE_MARCH_Z900_FEATURES
146 def_bool n
147
148config HAVE_MARCH_Z990_FEATURES
149 def_bool n
150 select HAVE_MARCH_Z900_FEATURES
151
152config HAVE_MARCH_Z9_109_FEATURES
153 def_bool n
154 select HAVE_MARCH_Z990_FEATURES
155
156config HAVE_MARCH_Z10_FEATURES
157 def_bool n
158 select HAVE_MARCH_Z9_109_FEATURES
159
160config HAVE_MARCH_Z196_FEATURES
161 def_bool n
162 select HAVE_MARCH_Z10_FEATURES
163
164choice
165 prompt "Processor type"
166 default MARCH_G5
167
168config MARCH_G5
169 bool "System/390 model G5 and G6"
170 depends on !64BIT
171 help
172 Select this to build a 31 bit kernel that works
173 on all ESA/390 and z/Architecture machines.
137 174
138comment "Processor type and features" 175config MARCH_Z900
176 bool "IBM zSeries model z800 and z900"
177 select HAVE_MARCH_Z900_FEATURES if 64BIT
178 help
179 Select this to enable optimizations for model z800/z900 (2064 and
180 2066 series). This will enable some optimizations that are not
181 available on older ESA/390 (31 Bit) only CPUs.
182
183config MARCH_Z990
184 bool "IBM zSeries model z890 and z990"
185 select HAVE_MARCH_Z990_FEATURES if 64BIT
186 help
187 Select this to enable optimizations for model z890/z990 (2084 and
188 2086 series). The kernel will be slightly faster but will not work
189 on older machines.
190
191config MARCH_Z9_109
192 bool "IBM System z9"
193 select HAVE_MARCH_Z9_109_FEATURES if 64BIT
194 help
195 Select this to enable optimizations for IBM System z9 (2094 and
196 2096 series). The kernel will be slightly faster but will not work
197 on older machines.
198
199config MARCH_Z10
200 bool "IBM System z10"
201 select HAVE_MARCH_Z10_FEATURES if 64BIT
202 help
203 Select this to enable optimizations for IBM System z10 (2097 and
204 2098 series). The kernel will be slightly faster but will not work
205 on older machines.
206
207config MARCH_Z196
208 bool "IBM zEnterprise 114 and 196"
209 select HAVE_MARCH_Z196_FEATURES if 64BIT
210 help
211 Select this to enable optimizations for IBM zEnterprise 114 and 196
212 (2818 and 2817 series). The kernel will be slightly faster but will
213 not work on older machines.
214
215endchoice
139 216
140config 64BIT 217config 64BIT
141 def_bool y 218 def_bool y
@@ -147,6 +224,24 @@ config 64BIT
147config 32BIT 224config 32BIT
148 def_bool y if !64BIT 225 def_bool y if !64BIT
149 226
227config COMPAT
228 def_bool y
229 prompt "Kernel support for 31 bit emulation"
230 depends on 64BIT
231 select COMPAT_BINFMT_ELF if BINFMT_ELF
232 select ARCH_WANT_OLD_COMPAT_IPC
233 help
234 Select this option if you want to enable your system kernel to
235 handle system-calls from ELF binaries for 31 bit ESA. This option
236 (and some other stuff like libraries and such) is needed for
237 executing 31 bit applications. It is safe to say "Y".
238
239config SYSVIPC_COMPAT
240 def_bool y if COMPAT && SYSVIPC
241
242config KEYS_COMPAT
243 def_bool y if COMPAT && KEYS
244
150config SMP 245config SMP
151 def_bool y 246 def_bool y
152 prompt "Symmetric multi-processing support" 247 prompt "Symmetric multi-processing support"
@@ -202,6 +297,8 @@ config SCHED_BOOK
202 Book scheduler support improves the CPU scheduler's decision making 297 Book scheduler support improves the CPU scheduler's decision making
203 when dealing with machines that have several books. 298 when dealing with machines that have several books.
204 299
300source kernel/Kconfig.preempt
301
205config MATHEMU 302config MATHEMU
206 def_bool y 303 def_bool y
207 prompt "IEEE FPU emulation" 304 prompt "IEEE FPU emulation"
@@ -211,100 +308,35 @@ config MATHEMU
211 on older ESA/390 machines. Say Y unless you know your machine doesn't 308 on older ESA/390 machines. Say Y unless you know your machine doesn't
212 need this. 309 need this.
213 310
214config COMPAT 311source kernel/Kconfig.hz
215 def_bool y
216 prompt "Kernel support for 31 bit emulation"
217 depends on 64BIT
218 select COMPAT_BINFMT_ELF if BINFMT_ELF
219 select ARCH_WANT_OLD_COMPAT_IPC
220 help
221 Select this option if you want to enable your system kernel to
222 handle system-calls from ELF binaries for 31 bit ESA. This option
223 (and some other stuff like libraries and such) is needed for
224 executing 31 bit applications. It is safe to say "Y".
225 312
226config SYSVIPC_COMPAT 313endmenu
227 def_bool y if COMPAT && SYSVIPC
228 314
229config KEYS_COMPAT 315menu "Memory setup"
230 def_bool y if COMPAT && KEYS
231 316
232config AUDIT_ARCH 317config ARCH_SPARSEMEM_ENABLE
233 def_bool y 318 def_bool y
319 select SPARSEMEM_VMEMMAP_ENABLE
320 select SPARSEMEM_VMEMMAP
321 select SPARSEMEM_STATIC if !64BIT
234 322
235config HAVE_MARCH_Z900_FEATURES 323config ARCH_SPARSEMEM_DEFAULT
236 def_bool n 324 def_bool y
237
238config HAVE_MARCH_Z990_FEATURES
239 def_bool n
240 select HAVE_MARCH_Z900_FEATURES
241
242config HAVE_MARCH_Z9_109_FEATURES
243 def_bool n
244 select HAVE_MARCH_Z990_FEATURES
245
246config HAVE_MARCH_Z10_FEATURES
247 def_bool n
248 select HAVE_MARCH_Z9_109_FEATURES
249
250config HAVE_MARCH_Z196_FEATURES
251 def_bool n
252 select HAVE_MARCH_Z10_FEATURES
253
254comment "Code generation options"
255
256choice
257 prompt "Processor type"
258 default MARCH_G5
259
260config MARCH_G5
261 bool "System/390 model G5 and G6"
262 depends on !64BIT
263 help
264 Select this to build a 31 bit kernel that works
265 on all ESA/390 and z/Architecture machines.
266
267config MARCH_Z900
268 bool "IBM zSeries model z800 and z900"
269 select HAVE_MARCH_Z900_FEATURES if 64BIT
270 help
271 Select this to enable optimizations for model z800/z900 (2064 and
272 2066 series). This will enable some optimizations that are not
273 available on older ESA/390 (31 Bit) only CPUs.
274 325
275config MARCH_Z990 326config ARCH_SELECT_MEMORY_MODEL
276 bool "IBM zSeries model z890 and z990" 327 def_bool y
277 select HAVE_MARCH_Z990_FEATURES if 64BIT
278 help
279 Select this to enable optimizations for model z890/z990 (2084 and
280 2086 series). The kernel will be slightly faster but will not work
281 on older machines.
282 328
283config MARCH_Z9_109 329config ARCH_ENABLE_MEMORY_HOTPLUG
284 bool "IBM System z9" 330 def_bool y if SPARSEMEM
285 select HAVE_MARCH_Z9_109_FEATURES if 64BIT
286 help
287 Select this to enable optimizations for IBM System z9 (2094 and
288 2096 series). The kernel will be slightly faster but will not work
289 on older machines.
290 331
291config MARCH_Z10 332config ARCH_ENABLE_MEMORY_HOTREMOVE
292 bool "IBM System z10" 333 def_bool y
293 select HAVE_MARCH_Z10_FEATURES if 64BIT
294 help
295 Select this to enable optimizations for IBM System z10 (2097 and
296 2098 series). The kernel will be slightly faster but will not work
297 on older machines.
298 334
299config MARCH_Z196 335config FORCE_MAX_ZONEORDER
300 bool "IBM zEnterprise 114 and 196" 336 int
301 select HAVE_MARCH_Z196_FEATURES if 64BIT 337 default "9"
302 help
303 Select this to enable optimizations for IBM zEnterprise 114 and 196
304 (2818 and 2817 series). The kernel will be slightly faster but will
305 not work on older machines.
306 338
307endchoice 339source "mm/Kconfig"
308 340
309config PACK_STACK 341config PACK_STACK
310 def_bool y 342 def_bool y
@@ -368,34 +400,9 @@ config WARN_DYNAMIC_STACK
368 400
369 Say N if you are unsure. 401 Say N if you are unsure.
370 402
371comment "Kernel preemption" 403endmenu
372
373source "kernel/Kconfig.preempt"
374
375config ARCH_SPARSEMEM_ENABLE
376 def_bool y
377 select SPARSEMEM_VMEMMAP_ENABLE
378 select SPARSEMEM_VMEMMAP
379 select SPARSEMEM_STATIC if !64BIT
380
381config ARCH_SPARSEMEM_DEFAULT
382 def_bool y
383
384config ARCH_SELECT_MEMORY_MODEL
385 def_bool y
386
387config ARCH_ENABLE_MEMORY_HOTPLUG
388 def_bool y if SPARSEMEM
389
390config ARCH_ENABLE_MEMORY_HOTREMOVE
391 def_bool y
392
393config ARCH_HIBERNATION_POSSIBLE
394 def_bool y if 64BIT
395
396source "mm/Kconfig"
397 404
398comment "I/O subsystem configuration" 405menu "I/O subsystem"
399 406
400config QDIO 407config QDIO
401 def_tristate y 408 def_tristate y
@@ -426,13 +433,102 @@ config CHSC_SCH
426 433
427 If unsure, say N. 434 If unsure, say N.
428 435
429comment "Misc" 436config SCM_BUS
437 def_bool y
438 depends on 64BIT
439 prompt "SCM bus driver"
440 help
441 Bus driver for Storage Class Memory.
442
443config EADM_SCH
444 def_tristate m
445 prompt "Support for EADM subchannels"
446 depends on SCM_BUS
447 help
448 This driver allows usage of EADM subchannels. EADM subchannels act
449 as a communication vehicle for SCM increments.
450
451 To compile this driver as a module, choose M here: the
452 module will be called eadm_sch.
453
454endmenu
455
456menu "Dump support"
457
458config CRASH_DUMP
459 bool "kernel crash dumps"
460 depends on 64BIT && SMP
461 select KEXEC
462 help
463 Generate crash dump after being started by kexec.
464 Crash dump kernels are loaded in the main kernel with kexec-tools
465 into a specially reserved region and then later executed after
466 a crash by kdump/kexec.
467 For more details see Documentation/kdump/kdump.txt
468
469config ZFCPDUMP
470 def_bool n
471 prompt "zfcpdump support"
472 select SMP
473 help
474 Select this option if you want to build an zfcpdump enabled kernel.
475 Refer to <file:Documentation/s390/zfcpdump.txt> for more details on this.
476
477endmenu
478
479menu "Executable file formats / Emulations"
430 480
431source "fs/Kconfig.binfmt" 481source "fs/Kconfig.binfmt"
432 482
433config FORCE_MAX_ZONEORDER 483config SECCOMP
434 int 484 def_bool y
435 default "9" 485 prompt "Enable seccomp to safely compute untrusted bytecode"
486 depends on PROC_FS
487 help
488 This kernel feature is useful for number crunching applications
489 that may need to compute untrusted bytecode during their
490 execution. By using pipes or other transports made available to
491 the process as file descriptors supporting the read/write
492 syscalls, it's possible to isolate those applications in
493 their own address space using seccomp. Once seccomp is
494 enabled via /proc/<pid>/seccomp, it cannot be disabled
495 and the task is only allowed to execute a few safe syscalls
496 defined by each seccomp mode.
497
498 If unsure, say Y.
499
500endmenu
501
502menu "Power Management"
503
504config ARCH_HIBERNATION_POSSIBLE
505 def_bool y if 64BIT
506
507source "kernel/power/Kconfig"
508
509endmenu
510
511source "net/Kconfig"
512
513config PCMCIA
514 def_bool n
515
516config CCW
517 def_bool y
518
519source "drivers/Kconfig"
520
521source "fs/Kconfig"
522
523source "arch/s390/Kconfig.debug"
524
525source "security/Kconfig"
526
527source "crypto/Kconfig"
528
529source "lib/Kconfig"
530
531menu "Virtualization"
436 532
437config PFAULT 533config PFAULT
438 def_bool y 534 def_bool y
@@ -448,8 +544,8 @@ config PFAULT
448 this option. 544 this option.
449 545
450config SHARED_KERNEL 546config SHARED_KERNEL
451 def_bool y 547 bool "VM shared kernel support"
452 prompt "VM shared kernel support" 548 depends on !JUMP_LABEL
453 help 549 help
454 Select this option, if you want to share the text segment of the 550 Select this option, if you want to share the text segment of the
455 Linux kernel between different VM guests. This reduces memory 551 Linux kernel between different VM guests. This reduces memory
@@ -544,8 +640,6 @@ config APPLDATA_NET_SUM
544 This can also be compiled as a module, which will be called 640 This can also be compiled as a module, which will be called
545 appldata_net_sum.o. 641 appldata_net_sum.o.
546 642
547source kernel/Kconfig.hz
548
549config S390_HYPFS_FS 643config S390_HYPFS_FS
550 def_bool y 644 def_bool y
551 prompt "s390 hypervisor file system support" 645 prompt "s390 hypervisor file system support"
@@ -554,90 +648,21 @@ config S390_HYPFS_FS
554 This is a virtual file system intended to provide accounting 648 This is a virtual file system intended to provide accounting
555 information in an s390 hypervisor environment. 649 information in an s390 hypervisor environment.
556 650
557config KEXEC 651source "arch/s390/kvm/Kconfig"
558 def_bool n
559 prompt "kexec system call"
560 help
561 kexec is a system call that implements the ability to shutdown your
562 current kernel, and to start another kernel. It is like a reboot
563 but is independent of hardware/microcode support.
564
565config CRASH_DUMP
566 bool "kernel crash dumps"
567 depends on 64BIT && SMP
568 select KEXEC
569 help
570 Generate crash dump after being started by kexec.
571 Crash dump kernels are loaded in the main kernel with kexec-tools
572 into a specially reserved region and then later executed after
573 a crash by kdump/kexec.
574 For more details see Documentation/kdump/kdump.txt
575
576config ZFCPDUMP
577 def_bool n
578 prompt "zfcpdump support"
579 select SMP
580 help
581 Select this option if you want to build an zfcpdump enabled kernel.
582 Refer to <file:Documentation/s390/zfcpdump.txt> for more details on this.
583 652
584config S390_GUEST 653config S390_GUEST
585 def_bool y 654 def_bool y
586 prompt "s390 guest support for KVM (EXPERIMENTAL)" 655 prompt "s390 support for virtio devices (EXPERIMENTAL)"
587 depends on 64BIT && EXPERIMENTAL 656 depends on 64BIT && EXPERIMENTAL
588 select VIRTUALIZATION 657 select VIRTUALIZATION
589 select VIRTIO 658 select VIRTIO
590 select VIRTIO_RING 659 select VIRTIO_RING
591 select VIRTIO_CONSOLE 660 select VIRTIO_CONSOLE
592 help 661 help
593 Select this option if you want to run the kernel as a guest under 662 Enabling this option adds support for virtio based paravirtual device
594 the KVM hypervisor. This will add detection for KVM as well as a 663 drivers on s390.
595 virtio transport. If KVM is detected, the virtio console will be
596 the default console.
597
598config SECCOMP
599 def_bool y
600 prompt "Enable seccomp to safely compute untrusted bytecode"
601 depends on PROC_FS
602 help
603 This kernel feature is useful for number crunching applications
604 that may need to compute untrusted bytecode during their
605 execution. By using pipes or other transports made available to
606 the process as file descriptors supporting the read/write
607 syscalls, it's possible to isolate those applications in
608 their own address space using seccomp. Once seccomp is
609 enabled via /proc/<pid>/seccomp, it cannot be disabled
610 and the task is only allowed to execute a few safe syscalls
611 defined by each seccomp mode.
612
613 If unsure, say Y.
614
615endmenu
616 664
617menu "Power Management" 665 Select this option if you want to run the kernel as a guest under
618 666 the KVM hypervisor.
619source "kernel/power/Kconfig"
620 667
621endmenu 668endmenu
622
623source "net/Kconfig"
624
625config PCMCIA
626 def_bool n
627
628config CCW
629 def_bool y
630
631source "drivers/Kconfig"
632
633source "fs/Kconfig"
634
635source "arch/s390/Kconfig.debug"
636
637source "security/Kconfig"
638
639source "crypto/Kconfig"
640
641source "lib/Kconfig"
642
643source "arch/s390/kvm/Kconfig"
diff --git a/arch/s390/boot/compressed/Makefile b/arch/s390/boot/compressed/Makefile
index 10e22c4ec4a7..3ad8f61c9985 100644
--- a/arch/s390/boot/compressed/Makefile
+++ b/arch/s390/boot/compressed/Makefile
@@ -11,6 +11,7 @@ targets := vmlinux.lds vmlinux vmlinux.bin vmlinux.bin.gz vmlinux.bin.bz2 \
11 sizes.h head$(BITS).o 11 sizes.h head$(BITS).o
12 12
13KBUILD_CFLAGS := -m$(BITS) -D__KERNEL__ $(LINUX_INCLUDE) -O2 13KBUILD_CFLAGS := -m$(BITS) -D__KERNEL__ $(LINUX_INCLUDE) -O2
14KBUILD_CFLAGS += -DDISABLE_BRANCH_PROFILING
14KBUILD_CFLAGS += $(cflags-y) 15KBUILD_CFLAGS += $(cflags-y)
15KBUILD_CFLAGS += $(call cc-option,-mpacked-stack) 16KBUILD_CFLAGS += $(call cc-option,-mpacked-stack)
16KBUILD_CFLAGS += $(call cc-option,-ffreestanding) 17KBUILD_CFLAGS += $(call cc-option,-ffreestanding)
diff --git a/arch/s390/boot/compressed/misc.c b/arch/s390/boot/compressed/misc.c
index 465eca756feb..c4c6a1cf221b 100644
--- a/arch/s390/boot/compressed/misc.c
+++ b/arch/s390/boot/compressed/misc.c
@@ -71,34 +71,37 @@ void *memset(void *s, int c, size_t n)
71{ 71{
72 char *xs; 72 char *xs;
73 73
74 if (c == 0) 74 xs = s;
75 return __builtin_memset(s, 0, n); 75 while (n--)
76 76 *xs++ = c;
77 xs = (char *) s;
78 if (n > 0)
79 do {
80 *xs++ = c;
81 } while (--n > 0);
82 return s; 77 return s;
83} 78}
84 79
85void *memcpy(void *__dest, __const void *__src, size_t __n) 80void *memcpy(void *dest, const void *src, size_t n)
86{ 81{
87 return __builtin_memcpy(__dest, __src, __n); 82 const char *s = src;
83 char *d = dest;
84
85 while (n--)
86 *d++ = *s++;
87 return dest;
88} 88}
89 89
90void *memmove(void *__dest, __const void *__src, size_t __n) 90void *memmove(void *dest, const void *src, size_t n)
91{ 91{
92 char *d; 92 const char *s = src;
93 const char *s; 93 char *d = dest;
94 94
95 if (__dest <= __src) 95 if (d <= s) {
96 return __builtin_memcpy(__dest, __src, __n); 96 while (n--)
97 d = __dest + __n; 97 *d++ = *s++;
98 s = __src + __n; 98 } else {
99 while (__n--) 99 d += n;
100 *--d = *--s; 100 s += n;
101 return __dest; 101 while (n--)
102 *--d = *--s;
103 }
104 return dest;
102} 105}
103 106
104static void error(char *x) 107static void error(char *x)
diff --git a/arch/s390/defconfig b/arch/s390/defconfig
index f39cd710980b..b74400e3e035 100644
--- a/arch/s390/defconfig
+++ b/arch/s390/defconfig
@@ -16,8 +16,8 @@ CONFIG_CGROUPS=y
16CONFIG_CPUSETS=y 16CONFIG_CPUSETS=y
17CONFIG_CGROUP_CPUACCT=y 17CONFIG_CGROUP_CPUACCT=y
18CONFIG_RESOURCE_COUNTERS=y 18CONFIG_RESOURCE_COUNTERS=y
19CONFIG_CGROUP_MEMCG=y 19CONFIG_MEMCG=y
20CONFIG_CGROUP_MEM_RES_CTLR_SWAP=y 20CONFIG_MEMCG_SWAP=y
21CONFIG_CGROUP_SCHED=y 21CONFIG_CGROUP_SCHED=y
22CONFIG_RT_GROUP_SCHED=y 22CONFIG_RT_GROUP_SCHED=y
23CONFIG_BLK_CGROUP=y 23CONFIG_BLK_CGROUP=y
@@ -32,20 +32,19 @@ CONFIG_EXPERT=y
32CONFIG_PROFILING=y 32CONFIG_PROFILING=y
33CONFIG_OPROFILE=y 33CONFIG_OPROFILE=y
34CONFIG_KPROBES=y 34CONFIG_KPROBES=y
35CONFIG_JUMP_LABEL=y
35CONFIG_MODULES=y 36CONFIG_MODULES=y
36CONFIG_MODULE_UNLOAD=y 37CONFIG_MODULE_UNLOAD=y
37CONFIG_MODVERSIONS=y 38CONFIG_MODVERSIONS=y
38CONFIG_PARTITION_ADVANCED=y 39CONFIG_PARTITION_ADVANCED=y
39CONFIG_IBM_PARTITION=y 40CONFIG_IBM_PARTITION=y
40CONFIG_DEFAULT_DEADLINE=y 41CONFIG_DEFAULT_DEADLINE=y
41CONFIG_PREEMPT=y 42CONFIG_HZ_100=y
42CONFIG_MEMORY_HOTPLUG=y 43CONFIG_MEMORY_HOTPLUG=y
43CONFIG_MEMORY_HOTREMOVE=y 44CONFIG_MEMORY_HOTREMOVE=y
44CONFIG_KSM=y 45CONFIG_KSM=y
45CONFIG_BINFMT_MISC=m
46CONFIG_CMM=m
47CONFIG_HZ_100=y
48CONFIG_CRASH_DUMP=y 46CONFIG_CRASH_DUMP=y
47CONFIG_BINFMT_MISC=m
49CONFIG_HIBERNATION=y 48CONFIG_HIBERNATION=y
50CONFIG_PACKET=y 49CONFIG_PACKET=y
51CONFIG_UNIX=y 50CONFIG_UNIX=y
@@ -75,6 +74,7 @@ CONFIG_NET_CLS_RSVP=m
75CONFIG_NET_CLS_RSVP6=m 74CONFIG_NET_CLS_RSVP6=m
76CONFIG_NET_CLS_ACT=y 75CONFIG_NET_CLS_ACT=y
77CONFIG_NET_ACT_POLICE=y 76CONFIG_NET_ACT_POLICE=y
77CONFIG_BPF_JIT=y
78CONFIG_UEVENT_HELPER_PATH="/sbin/hotplug" 78CONFIG_UEVENT_HELPER_PATH="/sbin/hotplug"
79CONFIG_DEVTMPFS=y 79CONFIG_DEVTMPFS=y
80CONFIG_BLK_DEV_LOOP=m 80CONFIG_BLK_DEV_LOOP=m
@@ -121,7 +121,6 @@ CONFIG_DEBUG_NOTIFIERS=y
121CONFIG_RCU_TRACE=y 121CONFIG_RCU_TRACE=y
122CONFIG_KPROBES_SANITY_TEST=y 122CONFIG_KPROBES_SANITY_TEST=y
123CONFIG_DEBUG_FORCE_WEAK_PER_CPU=y 123CONFIG_DEBUG_FORCE_WEAK_PER_CPU=y
124CONFIG_CPU_NOTIFIER_ERROR_INJECT=m
125CONFIG_LATENCYTOP=y 124CONFIG_LATENCYTOP=y
126CONFIG_DEBUG_PAGEALLOC=y 125CONFIG_DEBUG_PAGEALLOC=y
127CONFIG_BLK_DEV_IO_TRACE=y 126CONFIG_BLK_DEV_IO_TRACE=y
@@ -173,3 +172,4 @@ CONFIG_CRYPTO_SHA512_S390=m
173CONFIG_CRYPTO_DES_S390=m 172CONFIG_CRYPTO_DES_S390=m
174CONFIG_CRYPTO_AES_S390=m 173CONFIG_CRYPTO_AES_S390=m
175CONFIG_CRC7=m 174CONFIG_CRC7=m
175CONFIG_CMM=m
diff --git a/arch/s390/hypfs/inode.c b/arch/s390/hypfs/inode.c
index 6767b437a103..06ea69bd387a 100644
--- a/arch/s390/hypfs/inode.c
+++ b/arch/s390/hypfs/inode.c
@@ -31,8 +31,8 @@ static struct dentry *hypfs_create_update_file(struct super_block *sb,
31 struct dentry *dir); 31 struct dentry *dir);
32 32
33struct hypfs_sb_info { 33struct hypfs_sb_info {
34 uid_t uid; /* uid used for files and dirs */ 34 kuid_t uid; /* uid used for files and dirs */
35 gid_t gid; /* gid used for files and dirs */ 35 kgid_t gid; /* gid used for files and dirs */
36 struct dentry *update_file; /* file to trigger update */ 36 struct dentry *update_file; /* file to trigger update */
37 time_t last_update; /* last update time in secs since 1970 */ 37 time_t last_update; /* last update time in secs since 1970 */
38 struct mutex lock; /* lock to protect update process */ 38 struct mutex lock; /* lock to protect update process */
@@ -72,8 +72,6 @@ static void hypfs_remove(struct dentry *dentry)
72 struct dentry *parent; 72 struct dentry *parent;
73 73
74 parent = dentry->d_parent; 74 parent = dentry->d_parent;
75 if (!parent || !parent->d_inode)
76 return;
77 mutex_lock(&parent->d_inode->i_mutex); 75 mutex_lock(&parent->d_inode->i_mutex);
78 if (hypfs_positive(dentry)) { 76 if (hypfs_positive(dentry)) {
79 if (S_ISDIR(dentry->d_inode->i_mode)) 77 if (S_ISDIR(dentry->d_inode->i_mode))
@@ -229,6 +227,8 @@ static int hypfs_parse_options(char *options, struct super_block *sb)
229{ 227{
230 char *str; 228 char *str;
231 substring_t args[MAX_OPT_ARGS]; 229 substring_t args[MAX_OPT_ARGS];
230 kuid_t uid;
231 kgid_t gid;
232 232
233 if (!options) 233 if (!options)
234 return 0; 234 return 0;
@@ -243,12 +243,18 @@ static int hypfs_parse_options(char *options, struct super_block *sb)
243 case opt_uid: 243 case opt_uid:
244 if (match_int(&args[0], &option)) 244 if (match_int(&args[0], &option))
245 return -EINVAL; 245 return -EINVAL;
246 hypfs_info->uid = option; 246 uid = make_kuid(current_user_ns(), option);
247 if (!uid_valid(uid))
248 return -EINVAL;
249 hypfs_info->uid = uid;
247 break; 250 break;
248 case opt_gid: 251 case opt_gid:
249 if (match_int(&args[0], &option)) 252 if (match_int(&args[0], &option))
250 return -EINVAL; 253 return -EINVAL;
251 hypfs_info->gid = option; 254 gid = make_kgid(current_user_ns(), option);
255 if (!gid_valid(gid))
256 return -EINVAL;
257 hypfs_info->gid = gid;
252 break; 258 break;
253 case opt_err: 259 case opt_err:
254 default: 260 default:
@@ -263,8 +269,8 @@ static int hypfs_show_options(struct seq_file *s, struct dentry *root)
263{ 269{
264 struct hypfs_sb_info *hypfs_info = root->d_sb->s_fs_info; 270 struct hypfs_sb_info *hypfs_info = root->d_sb->s_fs_info;
265 271
266 seq_printf(s, ",uid=%u", hypfs_info->uid); 272 seq_printf(s, ",uid=%u", from_kuid_munged(&init_user_ns, hypfs_info->uid));
267 seq_printf(s, ",gid=%u", hypfs_info->gid); 273 seq_printf(s, ",gid=%u", from_kgid_munged(&init_user_ns, hypfs_info->gid));
268 return 0; 274 return 0;
269} 275}
270 276
diff --git a/arch/s390/include/asm/appldata.h b/arch/s390/include/asm/appldata.h
index f328294faeae..32a705987156 100644
--- a/arch/s390/include/asm/appldata.h
+++ b/arch/s390/include/asm/appldata.h
@@ -70,7 +70,7 @@ static inline int appldata_asm(struct appldata_product_id *id,
70 int ry; 70 int ry;
71 71
72 if (!MACHINE_IS_VM) 72 if (!MACHINE_IS_VM)
73 return -ENOSYS; 73 return -EOPNOTSUPP;
74 parm_list.diag = 0xdc; 74 parm_list.diag = 0xdc;
75 parm_list.function = fn; 75 parm_list.function = fn;
76 parm_list.parlist_length = sizeof(parm_list); 76 parm_list.parlist_length = sizeof(parm_list);
diff --git a/arch/s390/include/asm/chsc.h b/arch/s390/include/asm/chsc.h
index bf115b49f444..aea451fd182e 100644
--- a/arch/s390/include/asm/chsc.h
+++ b/arch/s390/include/asm/chsc.h
@@ -125,32 +125,4 @@ struct chsc_cpd_info {
125#define CHSC_INFO_CPD _IOWR(CHSC_IOCTL_MAGIC, 0x87, struct chsc_cpd_info) 125#define CHSC_INFO_CPD _IOWR(CHSC_IOCTL_MAGIC, 0x87, struct chsc_cpd_info)
126#define CHSC_INFO_DCAL _IOWR(CHSC_IOCTL_MAGIC, 0x88, struct chsc_dcal) 126#define CHSC_INFO_DCAL _IOWR(CHSC_IOCTL_MAGIC, 0x88, struct chsc_dcal)
127 127
128#ifdef __KERNEL__
129
130struct css_general_char {
131 u64 : 12;
132 u32 dynio : 1; /* bit 12 */
133 u32 : 28;
134 u32 aif : 1; /* bit 41 */
135 u32 : 3;
136 u32 mcss : 1; /* bit 45 */
137 u32 fcs : 1; /* bit 46 */
138 u32 : 1;
139 u32 ext_mb : 1; /* bit 48 */
140 u32 : 7;
141 u32 aif_tdd : 1; /* bit 56 */
142 u32 : 1;
143 u32 qebsm : 1; /* bit 58 */
144 u32 : 8;
145 u32 aif_osa : 1; /* bit 67 */
146 u32 : 14;
147 u32 cib : 1; /* bit 82 */
148 u32 : 5;
149 u32 fcx : 1; /* bit 88 */
150 u32 : 7;
151}__attribute__((packed));
152
153extern struct css_general_char css_general_characteristics;
154
155#endif /* __KERNEL__ */
156#endif 128#endif
diff --git a/arch/s390/include/asm/cio.h b/arch/s390/include/asm/cio.h
index 77043aa44d67..55bde6035216 100644
--- a/arch/s390/include/asm/cio.h
+++ b/arch/s390/include/asm/cio.h
@@ -80,6 +80,18 @@ struct erw {
80} __attribute__ ((packed)); 80} __attribute__ ((packed));
81 81
82/** 82/**
83 * struct erw_eadm - EADM Subchannel extended report word
84 * @b: aob error
85 * @r: arsb error
86 */
87struct erw_eadm {
88 __u32 : 16;
89 __u32 b : 1;
90 __u32 r : 1;
91 __u32 : 14;
92} __packed;
93
94/**
83 * struct sublog - subchannel logout area 95 * struct sublog - subchannel logout area
84 * @res0: reserved 96 * @res0: reserved
85 * @esf: extended status flags 97 * @esf: extended status flags
@@ -170,9 +182,22 @@ struct esw3 {
170} __attribute__ ((packed)); 182} __attribute__ ((packed));
171 183
172/** 184/**
185 * struct esw_eadm - EADM Subchannel Extended Status Word (ESW)
186 * @sublog: subchannel logout
187 * @erw: extended report word
188 */
189struct esw_eadm {
190 __u32 sublog;
191 struct erw_eadm erw;
192 __u32 : 32;
193 __u32 : 32;
194 __u32 : 32;
195} __packed;
196
197/**
173 * struct irb - interruption response block 198 * struct irb - interruption response block
174 * @scsw: subchannel status word 199 * @scsw: subchannel status word
175 * @esw: extened status word, 4 formats 200 * @esw: extened status word
176 * @ecw: extended control word 201 * @ecw: extended control word
177 * 202 *
178 * The irb that is handed to the device driver when an interrupt occurs. For 203 * The irb that is handed to the device driver when an interrupt occurs. For
@@ -191,6 +216,7 @@ struct irb {
191 struct esw1 esw1; 216 struct esw1 esw1;
192 struct esw2 esw2; 217 struct esw2 esw2;
193 struct esw3 esw3; 218 struct esw3 esw3;
219 struct esw_eadm eadm;
194 } esw; 220 } esw;
195 __u8 ecw[32]; 221 __u8 ecw[32];
196} __attribute__ ((packed,aligned(4))); 222} __attribute__ ((packed,aligned(4)));
diff --git a/arch/s390/include/asm/cmpxchg.h b/arch/s390/include/asm/cmpxchg.h
index 8d798e962b63..0f636cbdf342 100644
--- a/arch/s390/include/asm/cmpxchg.h
+++ b/arch/s390/include/asm/cmpxchg.h
@@ -7,7 +7,9 @@
7#ifndef __ASM_CMPXCHG_H 7#ifndef __ASM_CMPXCHG_H
8#define __ASM_CMPXCHG_H 8#define __ASM_CMPXCHG_H
9 9
10#include <linux/mmdebug.h>
10#include <linux/types.h> 11#include <linux/types.h>
12#include <linux/bug.h>
11 13
12extern void __xchg_called_with_bad_pointer(void); 14extern void __xchg_called_with_bad_pointer(void);
13 15
@@ -203,6 +205,65 @@ static inline unsigned long long __cmpxchg64(void *ptr,
203}) 205})
204#endif /* CONFIG_64BIT */ 206#endif /* CONFIG_64BIT */
205 207
208#define __cmpxchg_double_op(p1, p2, o1, o2, n1, n2, insn) \
209({ \
210 register __typeof__(*(p1)) __old1 asm("2") = (o1); \
211 register __typeof__(*(p2)) __old2 asm("3") = (o2); \
212 register __typeof__(*(p1)) __new1 asm("4") = (n1); \
213 register __typeof__(*(p2)) __new2 asm("5") = (n2); \
214 int cc; \
215 asm volatile( \
216 insn " %[old],%[new],%[ptr]\n" \
217 " ipm %[cc]\n" \
218 " srl %[cc],28" \
219 : [cc] "=d" (cc), [old] "+d" (__old1), "+d" (__old2) \
220 : [new] "d" (__new1), "d" (__new2), \
221 [ptr] "Q" (*(p1)), "Q" (*(p2)) \
222 : "memory", "cc"); \
223 !cc; \
224})
225
226#define __cmpxchg_double_4(p1, p2, o1, o2, n1, n2) \
227 __cmpxchg_double_op(p1, p2, o1, o2, n1, n2, "cds")
228
229#define __cmpxchg_double_8(p1, p2, o1, o2, n1, n2) \
230 __cmpxchg_double_op(p1, p2, o1, o2, n1, n2, "cdsg")
231
232extern void __cmpxchg_double_called_with_bad_pointer(void);
233
234#define __cmpxchg_double(p1, p2, o1, o2, n1, n2) \
235({ \
236 int __ret; \
237 switch (sizeof(*(p1))) { \
238 case 4: \
239 __ret = __cmpxchg_double_4(p1, p2, o1, o2, n1, n2); \
240 break; \
241 case 8: \
242 __ret = __cmpxchg_double_8(p1, p2, o1, o2, n1, n2); \
243 break; \
244 default: \
245 __cmpxchg_double_called_with_bad_pointer(); \
246 } \
247 __ret; \
248})
249
250#define cmpxchg_double(p1, p2, o1, o2, n1, n2) \
251({ \
252 __typeof__(p1) __p1 = (p1); \
253 __typeof__(p2) __p2 = (p2); \
254 int __ret; \
255 BUILD_BUG_ON(sizeof(*(p1)) != sizeof(long)); \
256 BUILD_BUG_ON(sizeof(*(p2)) != sizeof(long)); \
257 VM_BUG_ON((unsigned long)((__p1) + 1) != (unsigned long)(__p2));\
258 if (sizeof(long) == 4) \
259 __ret = __cmpxchg_double_4(__p1, __p2, o1, o2, n1, n2); \
260 else \
261 __ret = __cmpxchg_double_8(__p1, __p2, o1, o2, n1, n2); \
262 __ret; \
263})
264
265#define system_has_cmpxchg_double() 1
266
206#include <asm-generic/cmpxchg-local.h> 267#include <asm-generic/cmpxchg-local.h>
207 268
208static inline unsigned long __cmpxchg_local(void *ptr, 269static inline unsigned long __cmpxchg_local(void *ptr,
diff --git a/arch/s390/include/asm/cpu_mf.h b/arch/s390/include/asm/cpu_mf.h
index a3afecdae145..35f0020b7ba7 100644
--- a/arch/s390/include/asm/cpu_mf.h
+++ b/arch/s390/include/asm/cpu_mf.h
@@ -21,11 +21,15 @@
21#define CPU_MF_INT_SF_LSDA (1 << 22) /* loss of sample data alert */ 21#define CPU_MF_INT_SF_LSDA (1 << 22) /* loss of sample data alert */
22#define CPU_MF_INT_CF_CACA (1 << 7) /* counter auth. change alert */ 22#define CPU_MF_INT_CF_CACA (1 << 7) /* counter auth. change alert */
23#define CPU_MF_INT_CF_LCDA (1 << 6) /* loss of counter data alert */ 23#define CPU_MF_INT_CF_LCDA (1 << 6) /* loss of counter data alert */
24#define CPU_MF_INT_RI_HALTED (1 << 5) /* run-time instr. halted */
25#define CPU_MF_INT_RI_BUF_FULL (1 << 4) /* run-time instr. program
26 buffer full */
24 27
25#define CPU_MF_INT_CF_MASK (CPU_MF_INT_CF_CACA|CPU_MF_INT_CF_LCDA) 28#define CPU_MF_INT_CF_MASK (CPU_MF_INT_CF_CACA|CPU_MF_INT_CF_LCDA)
26#define CPU_MF_INT_SF_MASK (CPU_MF_INT_SF_IAE|CPU_MF_INT_SF_ISE| \ 29#define CPU_MF_INT_SF_MASK (CPU_MF_INT_SF_IAE|CPU_MF_INT_SF_ISE| \
27 CPU_MF_INT_SF_PRA|CPU_MF_INT_SF_SACA| \ 30 CPU_MF_INT_SF_PRA|CPU_MF_INT_SF_SACA| \
28 CPU_MF_INT_SF_LSDA) 31 CPU_MF_INT_SF_LSDA)
32#define CPU_MF_INT_RI_MASK (CPU_MF_INT_RI_HALTED|CPU_MF_INT_RI_BUF_FULL)
29 33
30/* CPU measurement facility support */ 34/* CPU measurement facility support */
31static inline int cpum_cf_avail(void) 35static inline int cpum_cf_avail(void)
diff --git a/arch/s390/include/asm/cputime.h b/arch/s390/include/asm/cputime.h
index 8709bdef233c..023d5ae24482 100644
--- a/arch/s390/include/asm/cputime.h
+++ b/arch/s390/include/asm/cputime.h
@@ -12,6 +12,9 @@
12#include <linux/spinlock.h> 12#include <linux/spinlock.h>
13#include <asm/div64.h> 13#include <asm/div64.h>
14 14
15
16#define __ARCH_HAS_VTIME_ACCOUNT
17
15/* We want to use full resolution of the CPU timer: 2**-12 micro-seconds. */ 18/* We want to use full resolution of the CPU timer: 2**-12 micro-seconds. */
16 19
17typedef unsigned long long __nocast cputime_t; 20typedef unsigned long long __nocast cputime_t;
diff --git a/arch/s390/include/asm/css_chars.h b/arch/s390/include/asm/css_chars.h
new file mode 100644
index 000000000000..a06ebc2623fb
--- /dev/null
+++ b/arch/s390/include/asm/css_chars.h
@@ -0,0 +1,39 @@
1#ifndef _ASM_CSS_CHARS_H
2#define _ASM_CSS_CHARS_H
3
4#include <linux/types.h>
5
6#ifdef __KERNEL__
7
8struct css_general_char {
9 u64 : 12;
10 u32 dynio : 1; /* bit 12 */
11 u32 : 4;
12 u32 eadm : 1; /* bit 17 */
13 u32 : 23;
14 u32 aif : 1; /* bit 41 */
15 u32 : 3;
16 u32 mcss : 1; /* bit 45 */
17 u32 fcs : 1; /* bit 46 */
18 u32 : 1;
19 u32 ext_mb : 1; /* bit 48 */
20 u32 : 7;
21 u32 aif_tdd : 1; /* bit 56 */
22 u32 : 1;
23 u32 qebsm : 1; /* bit 58 */
24 u32 : 8;
25 u32 aif_osa : 1; /* bit 67 */
26 u32 : 12;
27 u32 eadm_rf : 1; /* bit 80 */
28 u32 : 1;
29 u32 cib : 1; /* bit 82 */
30 u32 : 5;
31 u32 fcx : 1; /* bit 88 */
32 u32 : 19;
33 u32 alt_ssi : 1; /* bit 108 */
34} __packed;
35
36extern struct css_general_char css_general_characteristics;
37
38#endif /* __KERNEL__ */
39#endif
diff --git a/arch/s390/include/asm/eadm.h b/arch/s390/include/asm/eadm.h
new file mode 100644
index 000000000000..8d4847191ecc
--- /dev/null
+++ b/arch/s390/include/asm/eadm.h
@@ -0,0 +1,124 @@
1#ifndef _ASM_S390_EADM_H
2#define _ASM_S390_EADM_H
3
4#include <linux/types.h>
5#include <linux/device.h>
6
7struct arqb {
8 u64 data;
9 u16 fmt:4;
10 u16:12;
11 u16 cmd_code;
12 u16:16;
13 u16 msb_count;
14 u32 reserved[12];
15} __packed;
16
17#define ARQB_CMD_MOVE 1
18
19struct arsb {
20 u16 fmt:4;
21 u32:28;
22 u8 ef;
23 u8:8;
24 u8 ecbi;
25 u8:8;
26 u8 fvf;
27 u16:16;
28 u8 eqc;
29 u32:32;
30 u64 fail_msb;
31 u64 fail_aidaw;
32 u64 fail_ms;
33 u64 fail_scm;
34 u32 reserved[4];
35} __packed;
36
37struct msb {
38 u8 fmt:4;
39 u8 oc:4;
40 u8 flags;
41 u16:12;
42 u16 bs:4;
43 u32 blk_count;
44 u64 data_addr;
45 u64 scm_addr;
46 u64:64;
47} __packed;
48
49struct aidaw {
50 u8 flags;
51 u32 :24;
52 u32 :32;
53 u64 data_addr;
54} __packed;
55
56#define MSB_OC_CLEAR 0
57#define MSB_OC_READ 1
58#define MSB_OC_WRITE 2
59#define MSB_OC_RELEASE 3
60
61#define MSB_FLAG_BNM 0x80
62#define MSB_FLAG_IDA 0x40
63
64#define MSB_BS_4K 0
65#define MSB_BS_1M 1
66
67#define AOB_NR_MSB 124
68
69struct aob {
70 struct arqb request;
71 struct arsb response;
72 struct msb msb[AOB_NR_MSB];
73} __packed __aligned(PAGE_SIZE);
74
75struct aob_rq_header {
76 struct scm_device *scmdev;
77 char data[0];
78};
79
80struct scm_device {
81 u64 address;
82 u64 size;
83 unsigned int nr_max_block;
84 struct device dev;
85 struct {
86 unsigned int persistence:4;
87 unsigned int oper_state:4;
88 unsigned int data_state:4;
89 unsigned int rank:4;
90 unsigned int release:1;
91 unsigned int res_id:8;
92 } __packed attrs;
93};
94
95#define OP_STATE_GOOD 1
96#define OP_STATE_TEMP_ERR 2
97#define OP_STATE_PERM_ERR 3
98
99struct scm_driver {
100 struct device_driver drv;
101 int (*probe) (struct scm_device *scmdev);
102 int (*remove) (struct scm_device *scmdev);
103 void (*notify) (struct scm_device *scmdev);
104 void (*handler) (struct scm_device *scmdev, void *data, int error);
105};
106
107int scm_driver_register(struct scm_driver *scmdrv);
108void scm_driver_unregister(struct scm_driver *scmdrv);
109
110int scm_start_aob(struct aob *aob);
111void scm_irq_handler(struct aob *aob, int error);
112
113struct eadm_ops {
114 int (*eadm_start) (struct aob *aob);
115 struct module *owner;
116};
117
118int scm_get_ref(void);
119void scm_put_ref(void);
120
121void register_eadm_ops(struct eadm_ops *ops);
122void unregister_eadm_ops(struct eadm_ops *ops);
123
124#endif /* _ASM_S390_EADM_H */
diff --git a/arch/s390/include/asm/elf.h b/arch/s390/include/asm/elf.h
index 9b94a160fe7f..178ff966a8ba 100644
--- a/arch/s390/include/asm/elf.h
+++ b/arch/s390/include/asm/elf.h
@@ -101,6 +101,7 @@
101#define HWCAP_S390_HPAGE 128 101#define HWCAP_S390_HPAGE 128
102#define HWCAP_S390_ETF3EH 256 102#define HWCAP_S390_ETF3EH 256
103#define HWCAP_S390_HIGH_GPRS 512 103#define HWCAP_S390_HIGH_GPRS 512
104#define HWCAP_S390_TE 1024
104 105
105/* 106/*
106 * These are used to set parameters in the core dumps. 107 * These are used to set parameters in the core dumps.
@@ -212,4 +213,6 @@ int arch_setup_additional_pages(struct linux_binprm *, int);
212extern unsigned long arch_randomize_brk(struct mm_struct *mm); 213extern unsigned long arch_randomize_brk(struct mm_struct *mm);
213#define arch_randomize_brk arch_randomize_brk 214#define arch_randomize_brk arch_randomize_brk
214 215
216void *fill_cpu_elf_notes(void *ptr, struct save_area *sa);
217
215#endif 218#endif
diff --git a/arch/s390/include/asm/etr.h b/arch/s390/include/asm/etr.h
index a24b03b9fb64..629b79a93165 100644
--- a/arch/s390/include/asm/etr.h
+++ b/arch/s390/include/asm/etr.h
@@ -140,7 +140,7 @@ struct etr_ptff_qto {
140/* Inline assembly helper functions */ 140/* Inline assembly helper functions */
141static inline int etr_setr(struct etr_eacr *ctrl) 141static inline int etr_setr(struct etr_eacr *ctrl)
142{ 142{
143 int rc = -ENOSYS; 143 int rc = -EOPNOTSUPP;
144 144
145 asm volatile( 145 asm volatile(
146 " .insn s,0xb2160000,%1\n" 146 " .insn s,0xb2160000,%1\n"
@@ -154,7 +154,7 @@ static inline int etr_setr(struct etr_eacr *ctrl)
154/* Stores a format 1 aib with 64 bytes */ 154/* Stores a format 1 aib with 64 bytes */
155static inline int etr_stetr(struct etr_aib *aib) 155static inline int etr_stetr(struct etr_aib *aib)
156{ 156{
157 int rc = -ENOSYS; 157 int rc = -EOPNOTSUPP;
158 158
159 asm volatile( 159 asm volatile(
160 " .insn s,0xb2170000,%1\n" 160 " .insn s,0xb2170000,%1\n"
@@ -169,7 +169,7 @@ static inline int etr_stetr(struct etr_aib *aib)
169static inline int etr_steai(struct etr_aib *aib, unsigned int func) 169static inline int etr_steai(struct etr_aib *aib, unsigned int func)
170{ 170{
171 register unsigned int reg0 asm("0") = func; 171 register unsigned int reg0 asm("0") = func;
172 int rc = -ENOSYS; 172 int rc = -EOPNOTSUPP;
173 173
174 asm volatile( 174 asm volatile(
175 " .insn s,0xb2b30000,%1\n" 175 " .insn s,0xb2b30000,%1\n"
@@ -190,7 +190,7 @@ static inline int etr_ptff(void *ptff_block, unsigned int func)
190{ 190{
191 register unsigned int reg0 asm("0") = func; 191 register unsigned int reg0 asm("0") = func;
192 register unsigned long reg1 asm("1") = (unsigned long) ptff_block; 192 register unsigned long reg1 asm("1") = (unsigned long) ptff_block;
193 int rc = -ENOSYS; 193 int rc = -EOPNOTSUPP;
194 194
195 asm volatile( 195 asm volatile(
196 " .word 0x0104\n" 196 " .word 0x0104\n"
diff --git a/arch/s390/include/asm/irq.h b/arch/s390/include/asm/irq.h
index 2b9d41899d21..6703dd986fd4 100644
--- a/arch/s390/include/asm/irq.h
+++ b/arch/s390/include/asm/irq.h
@@ -19,6 +19,7 @@ enum interruption_class {
19 EXTINT_IUC, 19 EXTINT_IUC,
20 EXTINT_CMS, 20 EXTINT_CMS,
21 EXTINT_CMC, 21 EXTINT_CMC,
22 EXTINT_CMR,
22 IOINT_CIO, 23 IOINT_CIO,
23 IOINT_QAI, 24 IOINT_QAI,
24 IOINT_DAS, 25 IOINT_DAS,
@@ -30,6 +31,7 @@ enum interruption_class {
30 IOINT_CLW, 31 IOINT_CLW,
31 IOINT_CTC, 32 IOINT_CTC,
32 IOINT_APB, 33 IOINT_APB,
34 IOINT_ADM,
33 IOINT_CSC, 35 IOINT_CSC,
34 NMI_NMI, 36 NMI_NMI,
35 NR_IRQS, 37 NR_IRQS,
diff --git a/arch/s390/include/asm/isc.h b/arch/s390/include/asm/isc.h
index 1420a1115948..5ae606456b0a 100644
--- a/arch/s390/include/asm/isc.h
+++ b/arch/s390/include/asm/isc.h
@@ -14,6 +14,7 @@
14/* Regular I/O interrupts. */ 14/* Regular I/O interrupts. */
15#define IO_SCH_ISC 3 /* regular I/O subchannels */ 15#define IO_SCH_ISC 3 /* regular I/O subchannels */
16#define CONSOLE_ISC 1 /* console I/O subchannel */ 16#define CONSOLE_ISC 1 /* console I/O subchannel */
17#define EADM_SCH_ISC 4 /* EADM subchannels */
17#define CHSC_SCH_ISC 7 /* CHSC subchannels */ 18#define CHSC_SCH_ISC 7 /* CHSC subchannels */
18/* Adapter interrupts. */ 19/* Adapter interrupts. */
19#define QDIO_AIRQ_ISC IO_SCH_ISC /* I/O subchannel in qdio mode */ 20#define QDIO_AIRQ_ISC IO_SCH_ISC /* I/O subchannel in qdio mode */
diff --git a/arch/s390/include/asm/lowcore.h b/arch/s390/include/asm/lowcore.h
index aab5555bbbda..bbf8141408cd 100644
--- a/arch/s390/include/asm/lowcore.h
+++ b/arch/s390/include/asm/lowcore.h
@@ -329,9 +329,13 @@ struct _lowcore {
329 __u8 pad_0x1338[0x1340-0x1338]; /* 0x1338 */ 329 __u8 pad_0x1338[0x1340-0x1338]; /* 0x1338 */
330 __u32 access_regs_save_area[16]; /* 0x1340 */ 330 __u32 access_regs_save_area[16]; /* 0x1340 */
331 __u64 cregs_save_area[16]; /* 0x1380 */ 331 __u64 cregs_save_area[16]; /* 0x1380 */
332 __u8 pad_0x1400[0x1800-0x1400]; /* 0x1400 */
333
334 /* Transaction abort diagnostic block */
335 __u8 pgm_tdb[256]; /* 0x1800 */
332 336
333 /* align to the top of the prefix area */ 337 /* align to the top of the prefix area */
334 __u8 pad_0x1400[0x2000-0x1400]; /* 0x1400 */ 338 __u8 pad_0x1900[0x2000-0x1900]; /* 0x1900 */
335} __packed; 339} __packed;
336 340
337#endif /* CONFIG_32BIT */ 341#endif /* CONFIG_32BIT */
diff --git a/arch/s390/include/asm/mmu_context.h b/arch/s390/include/asm/mmu_context.h
index b749c5733657..084e7755ed9b 100644
--- a/arch/s390/include/asm/mmu_context.h
+++ b/arch/s390/include/asm/mmu_context.h
@@ -57,7 +57,7 @@ static inline void update_mm(struct mm_struct *mm, struct task_struct *tsk)
57 pgd_t *pgd = mm->pgd; 57 pgd_t *pgd = mm->pgd;
58 58
59 S390_lowcore.user_asce = mm->context.asce_bits | __pa(pgd); 59 S390_lowcore.user_asce = mm->context.asce_bits | __pa(pgd);
60 if (addressing_mode != HOME_SPACE_MODE) { 60 if (s390_user_mode != HOME_SPACE_MODE) {
61 /* Load primary space page table origin. */ 61 /* Load primary space page table origin. */
62 asm volatile(LCTL_OPCODE" 1,1,%0\n" 62 asm volatile(LCTL_OPCODE" 1,1,%0\n"
63 : : "m" (S390_lowcore.user_asce) ); 63 : : "m" (S390_lowcore.user_asce) );
diff --git a/arch/s390/include/asm/percpu.h b/arch/s390/include/asm/percpu.h
index 6537e72e0853..86fe0ee2cee5 100644
--- a/arch/s390/include/asm/percpu.h
+++ b/arch/s390/include/asm/percpu.h
@@ -20,7 +20,7 @@
20#endif 20#endif
21 21
22#define arch_this_cpu_to_op(pcp, val, op) \ 22#define arch_this_cpu_to_op(pcp, val, op) \
23do { \ 23({ \
24 typedef typeof(pcp) pcp_op_T__; \ 24 typedef typeof(pcp) pcp_op_T__; \
25 pcp_op_T__ old__, new__, prev__; \ 25 pcp_op_T__ old__, new__, prev__; \
26 pcp_op_T__ *ptr__; \ 26 pcp_op_T__ *ptr__; \
@@ -39,13 +39,19 @@ do { \
39 } \ 39 } \
40 } while (prev__ != old__); \ 40 } while (prev__ != old__); \
41 preempt_enable(); \ 41 preempt_enable(); \
42} while (0) 42 new__; \
43})
43 44
44#define this_cpu_add_1(pcp, val) arch_this_cpu_to_op(pcp, val, +) 45#define this_cpu_add_1(pcp, val) arch_this_cpu_to_op(pcp, val, +)
45#define this_cpu_add_2(pcp, val) arch_this_cpu_to_op(pcp, val, +) 46#define this_cpu_add_2(pcp, val) arch_this_cpu_to_op(pcp, val, +)
46#define this_cpu_add_4(pcp, val) arch_this_cpu_to_op(pcp, val, +) 47#define this_cpu_add_4(pcp, val) arch_this_cpu_to_op(pcp, val, +)
47#define this_cpu_add_8(pcp, val) arch_this_cpu_to_op(pcp, val, +) 48#define this_cpu_add_8(pcp, val) arch_this_cpu_to_op(pcp, val, +)
48 49
50#define this_cpu_add_return_1(pcp, val) arch_this_cpu_to_op(pcp, val, +)
51#define this_cpu_add_return_2(pcp, val) arch_this_cpu_to_op(pcp, val, +)
52#define this_cpu_add_return_4(pcp, val) arch_this_cpu_to_op(pcp, val, +)
53#define this_cpu_add_return_8(pcp, val) arch_this_cpu_to_op(pcp, val, +)
54
49#define this_cpu_and_1(pcp, val) arch_this_cpu_to_op(pcp, val, &) 55#define this_cpu_and_1(pcp, val) arch_this_cpu_to_op(pcp, val, &)
50#define this_cpu_and_2(pcp, val) arch_this_cpu_to_op(pcp, val, &) 56#define this_cpu_and_2(pcp, val) arch_this_cpu_to_op(pcp, val, &)
51#define this_cpu_and_4(pcp, val) arch_this_cpu_to_op(pcp, val, &) 57#define this_cpu_and_4(pcp, val) arch_this_cpu_to_op(pcp, val, &)
@@ -61,7 +67,7 @@ do { \
61#define this_cpu_xor_4(pcp, val) arch_this_cpu_to_op(pcp, val, ^) 67#define this_cpu_xor_4(pcp, val) arch_this_cpu_to_op(pcp, val, ^)
62#define this_cpu_xor_8(pcp, val) arch_this_cpu_to_op(pcp, val, ^) 68#define this_cpu_xor_8(pcp, val) arch_this_cpu_to_op(pcp, val, ^)
63 69
64#define arch_this_cpu_cmpxchg(pcp, oval, nval) \ 70#define arch_this_cpu_cmpxchg(pcp, oval, nval) \
65({ \ 71({ \
66 typedef typeof(pcp) pcp_op_T__; \ 72 typedef typeof(pcp) pcp_op_T__; \
67 pcp_op_T__ ret__; \ 73 pcp_op_T__ ret__; \
@@ -84,6 +90,44 @@ do { \
84#define this_cpu_cmpxchg_4(pcp, oval, nval) arch_this_cpu_cmpxchg(pcp, oval, nval) 90#define this_cpu_cmpxchg_4(pcp, oval, nval) arch_this_cpu_cmpxchg(pcp, oval, nval)
85#define this_cpu_cmpxchg_8(pcp, oval, nval) arch_this_cpu_cmpxchg(pcp, oval, nval) 91#define this_cpu_cmpxchg_8(pcp, oval, nval) arch_this_cpu_cmpxchg(pcp, oval, nval)
86 92
93#define arch_this_cpu_xchg(pcp, nval) \
94({ \
95 typeof(pcp) *ptr__; \
96 typeof(pcp) ret__; \
97 preempt_disable(); \
98 ptr__ = __this_cpu_ptr(&(pcp)); \
99 ret__ = xchg(ptr__, nval); \
100 preempt_enable(); \
101 ret__; \
102})
103
104#define this_cpu_xchg_1(pcp, nval) arch_this_cpu_xchg(pcp, nval)
105#define this_cpu_xchg_2(pcp, nval) arch_this_cpu_xchg(pcp, nval)
106#define this_cpu_xchg_4(pcp, nval) arch_this_cpu_xchg(pcp, nval)
107#ifdef CONFIG_64BIT
108#define this_cpu_xchg_8(pcp, nval) arch_this_cpu_xchg(pcp, nval)
109#endif
110
111#define arch_this_cpu_cmpxchg_double(pcp1, pcp2, o1, o2, n1, n2) \
112({ \
113 typeof(pcp1) o1__ = (o1), n1__ = (n1); \
114 typeof(pcp2) o2__ = (o2), n2__ = (n2); \
115 typeof(pcp1) *p1__; \
116 typeof(pcp2) *p2__; \
117 int ret__; \
118 preempt_disable(); \
119 p1__ = __this_cpu_ptr(&(pcp1)); \
120 p2__ = __this_cpu_ptr(&(pcp2)); \
121 ret__ = __cmpxchg_double(p1__, p2__, o1__, o2__, n1__, n2__); \
122 preempt_enable(); \
123 ret__; \
124})
125
126#define this_cpu_cmpxchg_double_4 arch_this_cpu_cmpxchg_double
127#ifdef CONFIG_64BIT
128#define this_cpu_cmpxchg_double_8 arch_this_cpu_cmpxchg_double
129#endif
130
87#include <asm-generic/percpu.h> 131#include <asm-generic/percpu.h>
88 132
89#endif /* __ARCH_S390_PERCPU__ */ 133#endif /* __ARCH_S390_PERCPU__ */
diff --git a/arch/s390/include/asm/processor.h b/arch/s390/include/asm/processor.h
index 11e4e3236937..f3e0aabfc6bc 100644
--- a/arch/s390/include/asm/processor.h
+++ b/arch/s390/include/asm/processor.h
@@ -11,12 +11,15 @@
11#ifndef __ASM_S390_PROCESSOR_H 11#ifndef __ASM_S390_PROCESSOR_H
12#define __ASM_S390_PROCESSOR_H 12#define __ASM_S390_PROCESSOR_H
13 13
14#ifndef __ASSEMBLY__
15
14#include <linux/linkage.h> 16#include <linux/linkage.h>
15#include <linux/irqflags.h> 17#include <linux/irqflags.h>
16#include <asm/cpu.h> 18#include <asm/cpu.h>
17#include <asm/page.h> 19#include <asm/page.h>
18#include <asm/ptrace.h> 20#include <asm/ptrace.h>
19#include <asm/setup.h> 21#include <asm/setup.h>
22#include <asm/runtime_instr.h>
20 23
21/* 24/*
22 * Default implementation of macro that returns current 25 * Default implementation of macro that returns current
@@ -75,11 +78,20 @@ struct thread_struct {
75 unsigned long gmap_addr; /* address of last gmap fault. */ 78 unsigned long gmap_addr; /* address of last gmap fault. */
76 struct per_regs per_user; /* User specified PER registers */ 79 struct per_regs per_user; /* User specified PER registers */
77 struct per_event per_event; /* Cause of the last PER trap */ 80 struct per_event per_event; /* Cause of the last PER trap */
81 unsigned long per_flags; /* Flags to control debug behavior */
78 /* pfault_wait is used to block the process on a pfault event */ 82 /* pfault_wait is used to block the process on a pfault event */
79 unsigned long pfault_wait; 83 unsigned long pfault_wait;
80 struct list_head list; 84 struct list_head list;
85 /* cpu runtime instrumentation */
86 struct runtime_instr_cb *ri_cb;
87 int ri_signum;
88#ifdef CONFIG_64BIT
89 unsigned char trap_tdb[256]; /* Transaction abort diagnose block */
90#endif
81}; 91};
82 92
93#define PER_FLAG_NO_TE 1UL /* Flag to disable transactions. */
94
83typedef struct thread_struct thread_struct; 95typedef struct thread_struct thread_struct;
84 96
85/* 97/*
@@ -130,6 +142,12 @@ struct task_struct;
130struct mm_struct; 142struct mm_struct;
131struct seq_file; 143struct seq_file;
132 144
145#ifdef CONFIG_64BIT
146extern void show_cacheinfo(struct seq_file *m);
147#else
148static inline void show_cacheinfo(struct seq_file *m) { }
149#endif
150
133/* Free all resources held by a thread. */ 151/* Free all resources held by a thread. */
134extern void release_thread(struct task_struct *); 152extern void release_thread(struct task_struct *);
135extern int kernel_thread(int (*fn)(void *), void * arg, unsigned long flags); 153extern int kernel_thread(int (*fn)(void *), void * arg, unsigned long flags);
@@ -140,6 +158,7 @@ extern int kernel_thread(int (*fn)(void *), void * arg, unsigned long flags);
140extern unsigned long thread_saved_pc(struct task_struct *t); 158extern unsigned long thread_saved_pc(struct task_struct *t);
141 159
142extern void show_code(struct pt_regs *regs); 160extern void show_code(struct pt_regs *regs);
161extern void print_fn_code(unsigned char *code, unsigned long len);
143 162
144unsigned long get_wchan(struct task_struct *p); 163unsigned long get_wchan(struct task_struct *p);
145#define task_pt_regs(tsk) ((struct pt_regs *) \ 164#define task_pt_regs(tsk) ((struct pt_regs *) \
@@ -331,23 +350,6 @@ extern void (*s390_base_ext_handler_fn)(void);
331 350
332#define ARCH_LOW_ADDRESS_LIMIT 0x7fffffffUL 351#define ARCH_LOW_ADDRESS_LIMIT 0x7fffffffUL
333 352
334/*
335 * Helper macro for exception table entries
336 */
337#ifndef CONFIG_64BIT
338#define EX_TABLE(_fault,_target) \
339 ".section __ex_table,\"a\"\n" \
340 " .align 4\n" \
341 " .long " #_fault "," #_target "\n" \
342 ".previous\n"
343#else
344#define EX_TABLE(_fault,_target) \
345 ".section __ex_table,\"a\"\n" \
346 " .align 8\n" \
347 " .quad " #_fault "," #_target "\n" \
348 ".previous\n"
349#endif
350
351extern int memcpy_real(void *, void *, size_t); 353extern int memcpy_real(void *, void *, size_t);
352extern void memcpy_absolute(void *, void *, size_t); 354extern void memcpy_absolute(void *, void *, size_t);
353 355
@@ -358,4 +360,25 @@ extern void memcpy_absolute(void *, void *, size_t);
358 memcpy_absolute(&(dest), &__tmp, sizeof(__tmp)); \ 360 memcpy_absolute(&(dest), &__tmp, sizeof(__tmp)); \
359} 361}
360 362
361#endif /* __ASM_S390_PROCESSOR_H */ 363/*
364 * Helper macro for exception table entries
365 */
366#define EX_TABLE(_fault, _target) \
367 ".section __ex_table,\"a\"\n" \
368 ".align 4\n" \
369 ".long (" #_fault ") - .\n" \
370 ".long (" #_target ") - .\n" \
371 ".previous\n"
372
373#else /* __ASSEMBLY__ */
374
375#define EX_TABLE(_fault, _target) \
376 .section __ex_table,"a" ; \
377 .align 4 ; \
378 .long (_fault) - . ; \
379 .long (_target) - . ; \
380 .previous
381
382#endif /* __ASSEMBLY__ */
383
384#endif /* __ASM_S390_PROCESSOR_H */
diff --git a/arch/s390/include/asm/ptrace.h b/arch/s390/include/asm/ptrace.h
index d5f08ea566ed..ce20a53afe91 100644
--- a/arch/s390/include/asm/ptrace.h
+++ b/arch/s390/include/asm/ptrace.h
@@ -235,6 +235,7 @@ typedef struct
235#define PSW_MASK_ASC 0x0000C000UL 235#define PSW_MASK_ASC 0x0000C000UL
236#define PSW_MASK_CC 0x00003000UL 236#define PSW_MASK_CC 0x00003000UL
237#define PSW_MASK_PM 0x00000F00UL 237#define PSW_MASK_PM 0x00000F00UL
238#define PSW_MASK_RI 0x00000000UL
238#define PSW_MASK_EA 0x00000000UL 239#define PSW_MASK_EA 0x00000000UL
239#define PSW_MASK_BA 0x00000000UL 240#define PSW_MASK_BA 0x00000000UL
240 241
@@ -264,10 +265,11 @@ typedef struct
264#define PSW_MASK_ASC 0x0000C00000000000UL 265#define PSW_MASK_ASC 0x0000C00000000000UL
265#define PSW_MASK_CC 0x0000300000000000UL 266#define PSW_MASK_CC 0x0000300000000000UL
266#define PSW_MASK_PM 0x00000F0000000000UL 267#define PSW_MASK_PM 0x00000F0000000000UL
268#define PSW_MASK_RI 0x0000008000000000UL
267#define PSW_MASK_EA 0x0000000100000000UL 269#define PSW_MASK_EA 0x0000000100000000UL
268#define PSW_MASK_BA 0x0000000080000000UL 270#define PSW_MASK_BA 0x0000000080000000UL
269 271
270#define PSW_MASK_USER 0x00003F0180000000UL 272#define PSW_MASK_USER 0x00003F8180000000UL
271 273
272#define PSW_ADDR_AMODE 0x0000000000000000UL 274#define PSW_ADDR_AMODE 0x0000000000000000UL
273#define PSW_ADDR_INSN 0xFFFFFFFFFFFFFFFFUL 275#define PSW_ADDR_INSN 0xFFFFFFFFFFFFFFFFUL
@@ -359,17 +361,19 @@ struct per_struct_kernel {
359 unsigned char access_id; /* PER trap access identification */ 361 unsigned char access_id; /* PER trap access identification */
360}; 362};
361 363
362#define PER_EVENT_MASK 0xE9000000UL 364#define PER_EVENT_MASK 0xEB000000UL
363 365
364#define PER_EVENT_BRANCH 0x80000000UL 366#define PER_EVENT_BRANCH 0x80000000UL
365#define PER_EVENT_IFETCH 0x40000000UL 367#define PER_EVENT_IFETCH 0x40000000UL
366#define PER_EVENT_STORE 0x20000000UL 368#define PER_EVENT_STORE 0x20000000UL
367#define PER_EVENT_STORE_REAL 0x08000000UL 369#define PER_EVENT_STORE_REAL 0x08000000UL
370#define PER_EVENT_TRANSACTION_END 0x02000000UL
368#define PER_EVENT_NULLIFICATION 0x01000000UL 371#define PER_EVENT_NULLIFICATION 0x01000000UL
369 372
370#define PER_CONTROL_MASK 0x00a00000UL 373#define PER_CONTROL_MASK 0x00e00000UL
371 374
372#define PER_CONTROL_BRANCH_ADDRESS 0x00800000UL 375#define PER_CONTROL_BRANCH_ADDRESS 0x00800000UL
376#define PER_CONTROL_SUSPENSION 0x00400000UL
373#define PER_CONTROL_ALTERATION 0x00200000UL 377#define PER_CONTROL_ALTERATION 0x00200000UL
374 378
375#endif 379#endif
@@ -483,6 +487,8 @@ typedef struct
483#define PTRACE_GET_LAST_BREAK 0x5006 487#define PTRACE_GET_LAST_BREAK 0x5006
484#define PTRACE_PEEK_SYSTEM_CALL 0x5007 488#define PTRACE_PEEK_SYSTEM_CALL 0x5007
485#define PTRACE_POKE_SYSTEM_CALL 0x5008 489#define PTRACE_POKE_SYSTEM_CALL 0x5008
490#define PTRACE_ENABLE_TE 0x5009
491#define PTRACE_DISABLE_TE 0x5010
486 492
487/* 493/*
488 * PT_PROT definition is loosely based on hppa bsd definition in 494 * PT_PROT definition is loosely based on hppa bsd definition in
diff --git a/arch/s390/include/asm/runtime_instr.h b/arch/s390/include/asm/runtime_instr.h
new file mode 100644
index 000000000000..830da737ff85
--- /dev/null
+++ b/arch/s390/include/asm/runtime_instr.h
@@ -0,0 +1,98 @@
1#ifndef _RUNTIME_INSTR_H
2#define _RUNTIME_INSTR_H
3
4#define S390_RUNTIME_INSTR_START 0x1
5#define S390_RUNTIME_INSTR_STOP 0x2
6
7struct runtime_instr_cb {
8 __u64 buf_current;
9 __u64 buf_origin;
10 __u64 buf_limit;
11
12 __u32 valid : 1;
13 __u32 pstate : 1;
14 __u32 pstate_set_buf : 1;
15 __u32 home_space : 1;
16 __u32 altered : 1;
17 __u32 : 3;
18 __u32 pstate_sample : 1;
19 __u32 sstate_sample : 1;
20 __u32 pstate_collect : 1;
21 __u32 sstate_collect : 1;
22 __u32 : 1;
23 __u32 halted_int : 1;
24 __u32 int_requested : 1;
25 __u32 buffer_full_int : 1;
26 __u32 key : 4;
27 __u32 : 9;
28 __u32 rgs : 3;
29
30 __u32 mode : 4;
31 __u32 next : 1;
32 __u32 mae : 1;
33 __u32 : 2;
34 __u32 call_type_br : 1;
35 __u32 return_type_br : 1;
36 __u32 other_type_br : 1;
37 __u32 bc_other_type : 1;
38 __u32 emit : 1;
39 __u32 tx_abort : 1;
40 __u32 : 2;
41 __u32 bp_xn : 1;
42 __u32 bp_xt : 1;
43 __u32 bp_ti : 1;
44 __u32 bp_ni : 1;
45 __u32 suppr_y : 1;
46 __u32 suppr_z : 1;
47
48 __u32 dc_miss_extra : 1;
49 __u32 lat_lev_ignore : 1;
50 __u32 ic_lat_lev : 4;
51 __u32 dc_lat_lev : 4;
52
53 __u64 reserved1;
54 __u64 scaling_factor;
55 __u64 rsic;
56 __u64 reserved2;
57} __packed __aligned(8);
58
59extern struct runtime_instr_cb runtime_instr_empty_cb;
60
61static inline void load_runtime_instr_cb(struct runtime_instr_cb *cb)
62{
63 asm volatile(".insn rsy,0xeb0000000060,0,0,%0" /* LRIC */
64 : : "Q" (*cb));
65}
66
67static inline void store_runtime_instr_cb(struct runtime_instr_cb *cb)
68{
69 asm volatile(".insn rsy,0xeb0000000061,0,0,%0" /* STRIC */
70 : "=Q" (*cb) : : "cc");
71}
72
73static inline void save_ri_cb(struct runtime_instr_cb *cb_prev)
74{
75#ifdef CONFIG_64BIT
76 if (cb_prev)
77 store_runtime_instr_cb(cb_prev);
78#endif
79}
80
81static inline void restore_ri_cb(struct runtime_instr_cb *cb_next,
82 struct runtime_instr_cb *cb_prev)
83{
84#ifdef CONFIG_64BIT
85 if (cb_next)
86 load_runtime_instr_cb(cb_next);
87 else if (cb_prev)
88 load_runtime_instr_cb(&runtime_instr_empty_cb);
89#endif
90}
91
92#ifdef CONFIG_64BIT
93extern void exit_thread_runtime_instr(void);
94#else
95static inline void exit_thread_runtime_instr(void) { }
96#endif
97
98#endif /* _RUNTIME_INSTR_H */
diff --git a/arch/s390/include/asm/scsw.h b/arch/s390/include/asm/scsw.h
index 4071d00978cb..4af99cdaddf5 100644
--- a/arch/s390/include/asm/scsw.h
+++ b/arch/s390/include/asm/scsw.h
@@ -1,7 +1,7 @@
1/* 1/*
2 * Helper functions for scsw access. 2 * Helper functions for scsw access.
3 * 3 *
4 * Copyright IBM Corp. 2008, 2009 4 * Copyright IBM Corp. 2008, 2012
5 * Author(s): Peter Oberparleiter <peter.oberparleiter@de.ibm.com> 5 * Author(s): Peter Oberparleiter <peter.oberparleiter@de.ibm.com>
6 */ 6 */
7 7
@@ -9,7 +9,7 @@
9#define _ASM_S390_SCSW_H_ 9#define _ASM_S390_SCSW_H_
10 10
11#include <linux/types.h> 11#include <linux/types.h>
12#include <asm/chsc.h> 12#include <asm/css_chars.h>
13#include <asm/cio.h> 13#include <asm/cio.h>
14 14
15/** 15/**
@@ -100,14 +100,46 @@ struct tm_scsw {
100} __attribute__ ((packed)); 100} __attribute__ ((packed));
101 101
102/** 102/**
103 * struct eadm_scsw - subchannel status word for eadm subchannels
104 * @key: subchannel key
105 * @eswf: esw format
106 * @cc: deferred condition code
107 * @ectl: extended control
108 * @fctl: function control
109 * @actl: activity control
110 * @stctl: status control
111 * @aob: AOB address
112 * @dstat: device status
113 * @cstat: subchannel status
114 */
115struct eadm_scsw {
116 u32 key:4;
117 u32:1;
118 u32 eswf:1;
119 u32 cc:2;
120 u32:6;
121 u32 ectl:1;
122 u32:2;
123 u32 fctl:3;
124 u32 actl:7;
125 u32 stctl:5;
126 u32 aob;
127 u32 dstat:8;
128 u32 cstat:8;
129 u32:16;
130} __packed;
131
132/**
103 * union scsw - subchannel status word 133 * union scsw - subchannel status word
104 * @cmd: command-mode SCSW 134 * @cmd: command-mode SCSW
105 * @tm: transport-mode SCSW 135 * @tm: transport-mode SCSW
136 * @eadm: eadm SCSW
106 */ 137 */
107union scsw { 138union scsw {
108 struct cmd_scsw cmd; 139 struct cmd_scsw cmd;
109 struct tm_scsw tm; 140 struct tm_scsw tm;
110} __attribute__ ((packed)); 141 struct eadm_scsw eadm;
142} __packed;
111 143
112#define SCSW_FCTL_CLEAR_FUNC 0x1 144#define SCSW_FCTL_CLEAR_FUNC 0x1
113#define SCSW_FCTL_HALT_FUNC 0x2 145#define SCSW_FCTL_HALT_FUNC 0x2
diff --git a/arch/s390/include/asm/setup.h b/arch/s390/include/asm/setup.h
index e6859d16ee2d..87b47ca954f1 100644
--- a/arch/s390/include/asm/setup.h
+++ b/arch/s390/include/asm/setup.h
@@ -60,7 +60,7 @@ void create_mem_hole(struct mem_chunk memory_chunk[], unsigned long addr,
60#define SECONDARY_SPACE_MODE 2 60#define SECONDARY_SPACE_MODE 2
61#define HOME_SPACE_MODE 3 61#define HOME_SPACE_MODE 3
62 62
63extern unsigned int addressing_mode; 63extern unsigned int s390_user_mode;
64 64
65/* 65/*
66 * Machine features detected in head.S 66 * Machine features detected in head.S
@@ -80,6 +80,7 @@ extern unsigned int addressing_mode;
80#define MACHINE_FLAG_LPAR (1UL << 12) 80#define MACHINE_FLAG_LPAR (1UL << 12)
81#define MACHINE_FLAG_SPP (1UL << 13) 81#define MACHINE_FLAG_SPP (1UL << 13)
82#define MACHINE_FLAG_TOPOLOGY (1UL << 14) 82#define MACHINE_FLAG_TOPOLOGY (1UL << 14)
83#define MACHINE_FLAG_TE (1UL << 15)
83 84
84#define MACHINE_IS_VM (S390_lowcore.machine_flags & MACHINE_FLAG_VM) 85#define MACHINE_IS_VM (S390_lowcore.machine_flags & MACHINE_FLAG_VM)
85#define MACHINE_IS_KVM (S390_lowcore.machine_flags & MACHINE_FLAG_KVM) 86#define MACHINE_IS_KVM (S390_lowcore.machine_flags & MACHINE_FLAG_KVM)
@@ -98,6 +99,7 @@ extern unsigned int addressing_mode;
98#define MACHINE_HAS_PFMF (0) 99#define MACHINE_HAS_PFMF (0)
99#define MACHINE_HAS_SPP (0) 100#define MACHINE_HAS_SPP (0)
100#define MACHINE_HAS_TOPOLOGY (0) 101#define MACHINE_HAS_TOPOLOGY (0)
102#define MACHINE_HAS_TE (0)
101#else /* CONFIG_64BIT */ 103#else /* CONFIG_64BIT */
102#define MACHINE_HAS_IEEE (1) 104#define MACHINE_HAS_IEEE (1)
103#define MACHINE_HAS_CSP (1) 105#define MACHINE_HAS_CSP (1)
@@ -109,6 +111,7 @@ extern unsigned int addressing_mode;
109#define MACHINE_HAS_PFMF (S390_lowcore.machine_flags & MACHINE_FLAG_PFMF) 111#define MACHINE_HAS_PFMF (S390_lowcore.machine_flags & MACHINE_FLAG_PFMF)
110#define MACHINE_HAS_SPP (S390_lowcore.machine_flags & MACHINE_FLAG_SPP) 112#define MACHINE_HAS_SPP (S390_lowcore.machine_flags & MACHINE_FLAG_SPP)
111#define MACHINE_HAS_TOPOLOGY (S390_lowcore.machine_flags & MACHINE_FLAG_TOPOLOGY) 113#define MACHINE_HAS_TOPOLOGY (S390_lowcore.machine_flags & MACHINE_FLAG_TOPOLOGY)
114#define MACHINE_HAS_TE (S390_lowcore.machine_flags & MACHINE_FLAG_TE)
112#endif /* CONFIG_64BIT */ 115#endif /* CONFIG_64BIT */
113 116
114#define ZFCPDUMP_HSA_SIZE (32UL<<20) 117#define ZFCPDUMP_HSA_SIZE (32UL<<20)
diff --git a/arch/s390/include/asm/smp.h b/arch/s390/include/asm/smp.h
index ce26ac3cb162..b64f15c3b4cc 100644
--- a/arch/s390/include/asm/smp.h
+++ b/arch/s390/include/asm/smp.h
@@ -30,6 +30,8 @@ extern int smp_vcpu_scheduled(int cpu);
30extern void smp_yield_cpu(int cpu); 30extern void smp_yield_cpu(int cpu);
31extern void smp_yield(void); 31extern void smp_yield(void);
32extern void smp_stop_cpu(void); 32extern void smp_stop_cpu(void);
33extern void smp_cpu_set_polarization(int cpu, int val);
34extern int smp_cpu_get_polarization(int cpu);
33 35
34#else /* CONFIG_SMP */ 36#else /* CONFIG_SMP */
35 37
@@ -43,7 +45,7 @@ static inline void smp_call_online_cpu(void (*func)(void *), void *data)
43 func(data); 45 func(data);
44} 46}
45 47
46static inline int smp_find_processor_id(int address) { return 0; } 48static inline int smp_find_processor_id(u16 address) { return 0; }
47static inline int smp_store_status(int cpu) { return 0; } 49static inline int smp_store_status(int cpu) { return 0; }
48static inline int smp_vcpu_scheduled(int cpu) { return 1; } 50static inline int smp_vcpu_scheduled(int cpu) { return 1; }
49static inline void smp_yield_cpu(int cpu) { } 51static inline void smp_yield_cpu(int cpu) { }
diff --git a/arch/s390/include/asm/string.h b/arch/s390/include/asm/string.h
index 1bd1352fa3b5..7e2dcd7c57ef 100644
--- a/arch/s390/include/asm/string.h
+++ b/arch/s390/include/asm/string.h
@@ -96,7 +96,6 @@ static inline char *strcat(char *dst, const char *src)
96 96
97static inline char *strcpy(char *dst, const char *src) 97static inline char *strcpy(char *dst, const char *src)
98{ 98{
99#if __GNUC__ < 4
100 register int r0 asm("0") = 0; 99 register int r0 asm("0") = 0;
101 char *ret = dst; 100 char *ret = dst;
102 101
@@ -106,14 +105,10 @@ static inline char *strcpy(char *dst, const char *src)
106 : "+&a" (dst), "+&a" (src) : "d" (r0) 105 : "+&a" (dst), "+&a" (src) : "d" (r0)
107 : "cc", "memory"); 106 : "cc", "memory");
108 return ret; 107 return ret;
109#else
110 return __builtin_strcpy(dst, src);
111#endif
112} 108}
113 109
114static inline size_t strlen(const char *s) 110static inline size_t strlen(const char *s)
115{ 111{
116#if __GNUC__ < 4
117 register unsigned long r0 asm("0") = 0; 112 register unsigned long r0 asm("0") = 0;
118 const char *tmp = s; 113 const char *tmp = s;
119 114
@@ -122,9 +117,6 @@ static inline size_t strlen(const char *s)
122 " jo 0b" 117 " jo 0b"
123 : "+d" (r0), "+a" (tmp) : : "cc"); 118 : "+d" (r0), "+a" (tmp) : : "cc");
124 return r0 - (unsigned long) s; 119 return r0 - (unsigned long) s;
125#else
126 return __builtin_strlen(s);
127#endif
128} 120}
129 121
130static inline size_t strnlen(const char * s, size_t n) 122static inline size_t strnlen(const char * s, size_t n)
diff --git a/arch/s390/include/asm/switch_to.h b/arch/s390/include/asm/switch_to.h
index f223068b7822..f3a9e0f92704 100644
--- a/arch/s390/include/asm/switch_to.h
+++ b/arch/s390/include/asm/switch_to.h
@@ -80,21 +80,19 @@ static inline void restore_access_regs(unsigned int *acrs)
80 if (prev->mm) { \ 80 if (prev->mm) { \
81 save_fp_regs(&prev->thread.fp_regs); \ 81 save_fp_regs(&prev->thread.fp_regs); \
82 save_access_regs(&prev->thread.acrs[0]); \ 82 save_access_regs(&prev->thread.acrs[0]); \
83 save_ri_cb(prev->thread.ri_cb); \
83 } \ 84 } \
84 if (next->mm) { \ 85 if (next->mm) { \
85 restore_fp_regs(&next->thread.fp_regs); \ 86 restore_fp_regs(&next->thread.fp_regs); \
86 restore_access_regs(&next->thread.acrs[0]); \ 87 restore_access_regs(&next->thread.acrs[0]); \
88 restore_ri_cb(next->thread.ri_cb, prev->thread.ri_cb); \
87 update_per_regs(next); \ 89 update_per_regs(next); \
88 } \ 90 } \
89 prev = __switch_to(prev,next); \ 91 prev = __switch_to(prev,next); \
90} while (0) 92} while (0)
91 93
92extern void account_vtime(struct task_struct *, struct task_struct *);
93extern void account_tick_vtime(struct task_struct *);
94
95#define finish_arch_switch(prev) do { \ 94#define finish_arch_switch(prev) do { \
96 set_fs(current->thread.mm_segment); \ 95 set_fs(current->thread.mm_segment); \
97 account_vtime(prev, current); \
98} while (0) 96} while (0)
99 97
100#endif /* __ASM_SWITCH_TO_H */ 98#endif /* __ASM_SWITCH_TO_H */
diff --git a/arch/s390/include/asm/sysinfo.h b/arch/s390/include/asm/sysinfo.h
index 282ee36f6162..f92428e459f8 100644
--- a/arch/s390/include/asm/sysinfo.h
+++ b/arch/s390/include/asm/sysinfo.h
@@ -17,7 +17,10 @@
17#include <asm/bitsperlong.h> 17#include <asm/bitsperlong.h>
18 18
19struct sysinfo_1_1_1 { 19struct sysinfo_1_1_1 {
20 unsigned short :16; 20 unsigned char p:1;
21 unsigned char :6;
22 unsigned char t:1;
23 unsigned char :8;
21 unsigned char ccr; 24 unsigned char ccr;
22 unsigned char cai; 25 unsigned char cai;
23 char reserved_0[28]; 26 char reserved_0[28];
@@ -30,9 +33,14 @@ struct sysinfo_1_1_1 {
30 char model[16]; 33 char model[16];
31 char model_perm_cap[16]; 34 char model_perm_cap[16];
32 char model_temp_cap[16]; 35 char model_temp_cap[16];
33 char model_cap_rating[4]; 36 unsigned int model_cap_rating;
34 char model_perm_cap_rating[4]; 37 unsigned int model_perm_cap_rating;
35 char model_temp_cap_rating[4]; 38 unsigned int model_temp_cap_rating;
39 unsigned char typepct[5];
40 unsigned char reserved_2[3];
41 unsigned int ncr;
42 unsigned int npr;
43 unsigned int ntr;
36}; 44};
37 45
38struct sysinfo_1_2_1 { 46struct sysinfo_1_2_1 {
@@ -47,8 +55,9 @@ struct sysinfo_1_2_2 {
47 char format; 55 char format;
48 char reserved_0[1]; 56 char reserved_0[1];
49 unsigned short acc_offset; 57 unsigned short acc_offset;
50 char reserved_1[24]; 58 char reserved_1[20];
51 unsigned int secondary_capability; 59 unsigned int nominal_cap;
60 unsigned int secondary_cap;
52 unsigned int capability; 61 unsigned int capability;
53 unsigned short cpus_total; 62 unsigned short cpus_total;
54 unsigned short cpus_configured; 63 unsigned short cpus_configured;
@@ -109,6 +118,8 @@ struct sysinfo_3_2_2 {
109 char reserved_544[3552]; 118 char reserved_544[3552];
110}; 119};
111 120
121extern int topology_max_mnest;
122
112#define TOPOLOGY_CPU_BITS 64 123#define TOPOLOGY_CPU_BITS 64
113#define TOPOLOGY_NR_MAG 6 124#define TOPOLOGY_NR_MAG 6
114 125
@@ -142,21 +153,7 @@ struct sysinfo_15_1_x {
142 union topology_entry tle[0]; 153 union topology_entry tle[0];
143}; 154};
144 155
145static inline int stsi(void *sysinfo, int fc, int sel1, int sel2) 156int stsi(void *sysinfo, int fc, int sel1, int sel2);
146{
147 register int r0 asm("0") = (fc << 28) | sel1;
148 register int r1 asm("1") = sel2;
149
150 asm volatile(
151 " stsi 0(%2)\n"
152 "0: jz 2f\n"
153 "1: lhi %0,%3\n"
154 "2:\n"
155 EX_TABLE(0b, 1b)
156 : "+d" (r0) : "d" (r1), "a" (sysinfo), "K" (-ENOSYS)
157 : "cc", "memory");
158 return r0;
159}
160 157
161/* 158/*
162 * Service level reporting interface. 159 * Service level reporting interface.
diff --git a/arch/s390/include/asm/topology.h b/arch/s390/include/asm/topology.h
index 0837de80c351..9ca305383760 100644
--- a/arch/s390/include/asm/topology.h
+++ b/arch/s390/include/asm/topology.h
@@ -2,8 +2,8 @@
2#define _ASM_S390_TOPOLOGY_H 2#define _ASM_S390_TOPOLOGY_H
3 3
4#include <linux/cpumask.h> 4#include <linux/cpumask.h>
5#include <asm/sysinfo.h>
6 5
6struct sysinfo_15_1_x;
7struct cpu; 7struct cpu;
8 8
9#ifdef CONFIG_SCHED_BOOK 9#ifdef CONFIG_SCHED_BOOK
@@ -51,24 +51,6 @@ static inline void topology_expect_change(void) { }
51#define POLARIZATION_VM (2) 51#define POLARIZATION_VM (2)
52#define POLARIZATION_VH (3) 52#define POLARIZATION_VH (3)
53 53
54extern int cpu_polarization[];
55
56static inline void cpu_set_polarization(int cpu, int val)
57{
58#ifdef CONFIG_SCHED_BOOK
59 cpu_polarization[cpu] = val;
60#endif
61}
62
63static inline int cpu_read_polarization(int cpu)
64{
65#ifdef CONFIG_SCHED_BOOK
66 return cpu_polarization[cpu];
67#else
68 return POLARIZATION_HRZ;
69#endif
70}
71
72#ifdef CONFIG_SCHED_BOOK 54#ifdef CONFIG_SCHED_BOOK
73void s390_init_cpu_topology(void); 55void s390_init_cpu_topology(void);
74#else 56#else
diff --git a/arch/s390/include/asm/uaccess.h b/arch/s390/include/asm/uaccess.h
index a8ab18b18b54..34268df959a3 100644
--- a/arch/s390/include/asm/uaccess.h
+++ b/arch/s390/include/asm/uaccess.h
@@ -76,9 +76,22 @@ static inline int __range_ok(unsigned long addr, unsigned long size)
76 76
77struct exception_table_entry 77struct exception_table_entry
78{ 78{
79 unsigned long insn, fixup; 79 int insn, fixup;
80}; 80};
81 81
82static inline unsigned long extable_insn(const struct exception_table_entry *x)
83{
84 return (unsigned long)&x->insn + x->insn;
85}
86
87static inline unsigned long extable_fixup(const struct exception_table_entry *x)
88{
89 return (unsigned long)&x->fixup + x->fixup;
90}
91
92#define ARCH_HAS_SORT_EXTABLE
93#define ARCH_HAS_SEARCH_EXTABLE
94
82struct uaccess_ops { 95struct uaccess_ops {
83 size_t (*copy_from_user)(size_t, const void __user *, void *); 96 size_t (*copy_from_user)(size_t, const void __user *, void *);
84 size_t (*copy_from_user_small)(size_t, const void __user *, void *); 97 size_t (*copy_from_user_small)(size_t, const void __user *, void *);
diff --git a/arch/s390/include/asm/unistd.h b/arch/s390/include/asm/unistd.h
index 6756e78f4808..4e64b5cd1558 100644
--- a/arch/s390/include/asm/unistd.h
+++ b/arch/s390/include/asm/unistd.h
@@ -277,7 +277,9 @@
277#define __NR_setns 339 277#define __NR_setns 339
278#define __NR_process_vm_readv 340 278#define __NR_process_vm_readv 340
279#define __NR_process_vm_writev 341 279#define __NR_process_vm_writev 341
280#define NR_syscalls 342 280#define __NR_s390_runtime_instr 342
281#define __NR_kcmp 343
282#define NR_syscalls 344
281 283
282/* 284/*
283 * There are some system calls that are not present on 64 bit, some 285 * There are some system calls that are not present on 64 bit, some
diff --git a/arch/s390/include/uapi/asm/Kbuild b/arch/s390/include/uapi/asm/Kbuild
new file mode 100644
index 000000000000..baebb3da1d44
--- /dev/null
+++ b/arch/s390/include/uapi/asm/Kbuild
@@ -0,0 +1,3 @@
1# UAPI Header export list
2include include/uapi/asm-generic/Kbuild.asm
3
diff --git a/arch/s390/kernel/Makefile b/arch/s390/kernel/Makefile
index 9733b3f0eb6d..4da52fe31743 100644
--- a/arch/s390/kernel/Makefile
+++ b/arch/s390/kernel/Makefile
@@ -23,10 +23,11 @@ CFLAGS_sysinfo.o += -Iinclude/math-emu -Iarch/s390/math-emu -w
23obj-y := bitmap.o traps.o time.o process.o base.o early.o setup.o vtime.o \ 23obj-y := bitmap.o traps.o time.o process.o base.o early.o setup.o vtime.o \
24 processor.o sys_s390.o ptrace.o signal.o cpcmd.o ebcdic.o nmi.o \ 24 processor.o sys_s390.o ptrace.o signal.o cpcmd.o ebcdic.o nmi.o \
25 debug.o irq.o ipl.o dis.o diag.o mem_detect.o sclp.o vdso.o \ 25 debug.o irq.o ipl.o dis.o diag.o mem_detect.o sclp.o vdso.o \
26 sysinfo.o jump_label.o lgr.o os_info.o 26 sysinfo.o jump_label.o lgr.o os_info.o machine_kexec.o
27 27
28obj-y += $(if $(CONFIG_64BIT),entry64.o,entry.o) 28obj-y += $(if $(CONFIG_64BIT),entry64.o,entry.o)
29obj-y += $(if $(CONFIG_64BIT),reipl64.o,reipl.o) 29obj-y += $(if $(CONFIG_64BIT),reipl64.o,reipl.o)
30obj-y += $(if $(CONFIG_64BIT),relocate_kernel64.o,relocate_kernel.o)
30 31
31extra-y += head.o vmlinux.lds 32extra-y += head.o vmlinux.lds
32extra-y += $(if $(CONFIG_64BIT),head64.o,head31.o) 33extra-y += $(if $(CONFIG_64BIT),head64.o,head31.o)
@@ -48,12 +49,11 @@ obj-$(CONFIG_DYNAMIC_FTRACE) += ftrace.o
48obj-$(CONFIG_FUNCTION_GRAPH_TRACER) += ftrace.o 49obj-$(CONFIG_FUNCTION_GRAPH_TRACER) += ftrace.o
49obj-$(CONFIG_FTRACE_SYSCALLS) += ftrace.o 50obj-$(CONFIG_FTRACE_SYSCALLS) += ftrace.o
50obj-$(CONFIG_CRASH_DUMP) += crash_dump.o 51obj-$(CONFIG_CRASH_DUMP) += crash_dump.o
51obj-$(CONFIG_PERF_EVENTS) += perf_event.o perf_cpum_cf.o
52 52
53# Kexec part 53ifdef CONFIG_64BIT
54S390_KEXEC_OBJS := machine_kexec.o crash.o 54obj-$(CONFIG_PERF_EVENTS) += perf_event.o perf_cpum_cf.o
55S390_KEXEC_OBJS += $(if $(CONFIG_64BIT),relocate_kernel64.o,relocate_kernel.o) 55obj-y += runtime_instr.o cache.o
56obj-$(CONFIG_KEXEC) += $(S390_KEXEC_OBJS) 56endif
57 57
58# vdso 58# vdso
59obj-$(CONFIG_64BIT) += vdso64/ 59obj-$(CONFIG_64BIT) += vdso64/
diff --git a/arch/s390/kernel/asm-offsets.c b/arch/s390/kernel/asm-offsets.c
index 45ef1a7b08f9..fface87056eb 100644
--- a/arch/s390/kernel/asm-offsets.c
+++ b/arch/s390/kernel/asm-offsets.c
@@ -157,6 +157,8 @@ int main(void)
157 DEFINE(__LC_LAST_BREAK, offsetof(struct _lowcore, breaking_event_addr)); 157 DEFINE(__LC_LAST_BREAK, offsetof(struct _lowcore, breaking_event_addr));
158 DEFINE(__LC_VDSO_PER_CPU, offsetof(struct _lowcore, vdso_per_cpu_data)); 158 DEFINE(__LC_VDSO_PER_CPU, offsetof(struct _lowcore, vdso_per_cpu_data));
159 DEFINE(__LC_GMAP, offsetof(struct _lowcore, gmap)); 159 DEFINE(__LC_GMAP, offsetof(struct _lowcore, gmap));
160 DEFINE(__LC_PGM_TDB, offsetof(struct _lowcore, pgm_tdb));
161 DEFINE(__THREAD_trap_tdb, offsetof(struct task_struct, thread.trap_tdb));
160 DEFINE(__GMAP_ASCE, offsetof(struct gmap, asce)); 162 DEFINE(__GMAP_ASCE, offsetof(struct gmap, asce));
161#endif /* CONFIG_32BIT */ 163#endif /* CONFIG_32BIT */
162 return 0; 164 return 0;
diff --git a/arch/s390/kernel/cache.c b/arch/s390/kernel/cache.c
new file mode 100644
index 000000000000..8df8d8a19c98
--- /dev/null
+++ b/arch/s390/kernel/cache.c
@@ -0,0 +1,385 @@
1/*
2 * Extract CPU cache information and expose them via sysfs.
3 *
4 * Copyright IBM Corp. 2012
5 * Author(s): Heiko Carstens <heiko.carstens@de.ibm.com>
6 */
7
8#include <linux/notifier.h>
9#include <linux/seq_file.h>
10#include <linux/init.h>
11#include <linux/list.h>
12#include <linux/slab.h>
13#include <linux/cpu.h>
14#include <asm/facility.h>
15
16struct cache {
17 unsigned long size;
18 unsigned int line_size;
19 unsigned int associativity;
20 unsigned int nr_sets;
21 unsigned int level : 3;
22 unsigned int type : 2;
23 unsigned int private : 1;
24 struct list_head list;
25};
26
27struct cache_dir {
28 struct kobject *kobj;
29 struct cache_index_dir *index;
30};
31
32struct cache_index_dir {
33 struct kobject kobj;
34 int cpu;
35 struct cache *cache;
36 struct cache_index_dir *next;
37};
38
39enum {
40 CACHE_SCOPE_NOTEXISTS,
41 CACHE_SCOPE_PRIVATE,
42 CACHE_SCOPE_SHARED,
43 CACHE_SCOPE_RESERVED,
44};
45
46enum {
47 CACHE_TYPE_SEPARATE,
48 CACHE_TYPE_DATA,
49 CACHE_TYPE_INSTRUCTION,
50 CACHE_TYPE_UNIFIED,
51};
52
53enum {
54 EXTRACT_TOPOLOGY,
55 EXTRACT_LINE_SIZE,
56 EXTRACT_SIZE,
57 EXTRACT_ASSOCIATIVITY,
58};
59
60enum {
61 CACHE_TI_UNIFIED = 0,
62 CACHE_TI_INSTRUCTION = 0,
63 CACHE_TI_DATA,
64};
65
66struct cache_info {
67 unsigned char : 4;
68 unsigned char scope : 2;
69 unsigned char type : 2;
70};
71
72#define CACHE_MAX_LEVEL 8
73
74union cache_topology {
75 struct cache_info ci[CACHE_MAX_LEVEL];
76 unsigned long long raw;
77};
78
79static const char * const cache_type_string[] = {
80 "Data",
81 "Instruction",
82 "Unified",
83};
84
85static struct cache_dir *cache_dir_cpu[NR_CPUS];
86static LIST_HEAD(cache_list);
87
88void show_cacheinfo(struct seq_file *m)
89{
90 struct cache *cache;
91 int index = 0;
92
93 list_for_each_entry(cache, &cache_list, list) {
94 seq_printf(m, "cache%-11d: ", index);
95 seq_printf(m, "level=%d ", cache->level);
96 seq_printf(m, "type=%s ", cache_type_string[cache->type]);
97 seq_printf(m, "scope=%s ", cache->private ? "Private" : "Shared");
98 seq_printf(m, "size=%luK ", cache->size >> 10);
99 seq_printf(m, "line_size=%u ", cache->line_size);
100 seq_printf(m, "associativity=%d", cache->associativity);
101 seq_puts(m, "\n");
102 index++;
103 }
104}
105
106static inline unsigned long ecag(int ai, int li, int ti)
107{
108 unsigned long cmd, val;
109
110 cmd = ai << 4 | li << 1 | ti;
111 asm volatile(".insn rsy,0xeb000000004c,%0,0,0(%1)" /* ecag */
112 : "=d" (val) : "a" (cmd));
113 return val;
114}
115
116static int __init cache_add(int level, int private, int type)
117{
118 struct cache *cache;
119 int ti;
120
121 cache = kzalloc(sizeof(*cache), GFP_KERNEL);
122 if (!cache)
123 return -ENOMEM;
124 ti = type == CACHE_TYPE_DATA ? CACHE_TI_DATA : CACHE_TI_UNIFIED;
125 cache->size = ecag(EXTRACT_SIZE, level, ti);
126 cache->line_size = ecag(EXTRACT_LINE_SIZE, level, ti);
127 cache->associativity = ecag(EXTRACT_ASSOCIATIVITY, level, ti);
128 cache->nr_sets = cache->size / cache->associativity;
129 cache->nr_sets /= cache->line_size;
130 cache->private = private;
131 cache->level = level + 1;
132 cache->type = type - 1;
133 list_add_tail(&cache->list, &cache_list);
134 return 0;
135}
136
137static void __init cache_build_info(void)
138{
139 struct cache *cache, *next;
140 union cache_topology ct;
141 int level, private, rc;
142
143 ct.raw = ecag(EXTRACT_TOPOLOGY, 0, 0);
144 for (level = 0; level < CACHE_MAX_LEVEL; level++) {
145 switch (ct.ci[level].scope) {
146 case CACHE_SCOPE_NOTEXISTS:
147 case CACHE_SCOPE_RESERVED:
148 return;
149 case CACHE_SCOPE_SHARED:
150 private = 0;
151 break;
152 case CACHE_SCOPE_PRIVATE:
153 private = 1;
154 break;
155 }
156 if (ct.ci[level].type == CACHE_TYPE_SEPARATE) {
157 rc = cache_add(level, private, CACHE_TYPE_DATA);
158 rc |= cache_add(level, private, CACHE_TYPE_INSTRUCTION);
159 } else {
160 rc = cache_add(level, private, ct.ci[level].type);
161 }
162 if (rc)
163 goto error;
164 }
165 return;
166error:
167 list_for_each_entry_safe(cache, next, &cache_list, list) {
168 list_del(&cache->list);
169 kfree(cache);
170 }
171}
172
173static struct cache_dir *__cpuinit cache_create_cache_dir(int cpu)
174{
175 struct cache_dir *cache_dir;
176 struct kobject *kobj = NULL;
177 struct device *dev;
178
179 dev = get_cpu_device(cpu);
180 if (!dev)
181 goto out;
182 kobj = kobject_create_and_add("cache", &dev->kobj);
183 if (!kobj)
184 goto out;
185 cache_dir = kzalloc(sizeof(*cache_dir), GFP_KERNEL);
186 if (!cache_dir)
187 goto out;
188 cache_dir->kobj = kobj;
189 cache_dir_cpu[cpu] = cache_dir;
190 return cache_dir;
191out:
192 kobject_put(kobj);
193 return NULL;
194}
195
196static struct cache_index_dir *kobj_to_cache_index_dir(struct kobject *kobj)
197{
198 return container_of(kobj, struct cache_index_dir, kobj);
199}
200
201static void cache_index_release(struct kobject *kobj)
202{
203 struct cache_index_dir *index;
204
205 index = kobj_to_cache_index_dir(kobj);
206 kfree(index);
207}
208
209static ssize_t cache_index_show(struct kobject *kobj,
210 struct attribute *attr, char *buf)
211{
212 struct kobj_attribute *kobj_attr;
213
214 kobj_attr = container_of(attr, struct kobj_attribute, attr);
215 return kobj_attr->show(kobj, kobj_attr, buf);
216}
217
218#define DEFINE_CACHE_ATTR(_name, _format, _value) \
219static ssize_t cache_##_name##_show(struct kobject *kobj, \
220 struct kobj_attribute *attr, \
221 char *buf) \
222{ \
223 struct cache_index_dir *index; \
224 \
225 index = kobj_to_cache_index_dir(kobj); \
226 return sprintf(buf, _format, _value); \
227} \
228static struct kobj_attribute cache_##_name##_attr = \
229 __ATTR(_name, 0444, cache_##_name##_show, NULL);
230
231DEFINE_CACHE_ATTR(size, "%luK\n", index->cache->size >> 10);
232DEFINE_CACHE_ATTR(coherency_line_size, "%u\n", index->cache->line_size);
233DEFINE_CACHE_ATTR(number_of_sets, "%u\n", index->cache->nr_sets);
234DEFINE_CACHE_ATTR(ways_of_associativity, "%u\n", index->cache->associativity);
235DEFINE_CACHE_ATTR(type, "%s\n", cache_type_string[index->cache->type]);
236DEFINE_CACHE_ATTR(level, "%d\n", index->cache->level);
237
238static ssize_t shared_cpu_map_func(struct kobject *kobj, int type, char *buf)
239{
240 struct cache_index_dir *index;
241 int len;
242
243 index = kobj_to_cache_index_dir(kobj);
244 len = type ?
245 cpulist_scnprintf(buf, PAGE_SIZE - 2, cpumask_of(index->cpu)) :
246 cpumask_scnprintf(buf, PAGE_SIZE - 2, cpumask_of(index->cpu));
247 len += sprintf(&buf[len], "\n");
248 return len;
249}
250
251static ssize_t shared_cpu_map_show(struct kobject *kobj,
252 struct kobj_attribute *attr, char *buf)
253{
254 return shared_cpu_map_func(kobj, 0, buf);
255}
256static struct kobj_attribute cache_shared_cpu_map_attr =
257 __ATTR(shared_cpu_map, 0444, shared_cpu_map_show, NULL);
258
259static ssize_t shared_cpu_list_show(struct kobject *kobj,
260 struct kobj_attribute *attr, char *buf)
261{
262 return shared_cpu_map_func(kobj, 1, buf);
263}
264static struct kobj_attribute cache_shared_cpu_list_attr =
265 __ATTR(shared_cpu_list, 0444, shared_cpu_list_show, NULL);
266
267static struct attribute *cache_index_default_attrs[] = {
268 &cache_type_attr.attr,
269 &cache_size_attr.attr,
270 &cache_number_of_sets_attr.attr,
271 &cache_ways_of_associativity_attr.attr,
272 &cache_level_attr.attr,
273 &cache_coherency_line_size_attr.attr,
274 &cache_shared_cpu_map_attr.attr,
275 &cache_shared_cpu_list_attr.attr,
276 NULL,
277};
278
279static const struct sysfs_ops cache_index_ops = {
280 .show = cache_index_show,
281};
282
283static struct kobj_type cache_index_type = {
284 .sysfs_ops = &cache_index_ops,
285 .release = cache_index_release,
286 .default_attrs = cache_index_default_attrs,
287};
288
289static int __cpuinit cache_create_index_dir(struct cache_dir *cache_dir,
290 struct cache *cache, int index,
291 int cpu)
292{
293 struct cache_index_dir *index_dir;
294 int rc;
295
296 index_dir = kzalloc(sizeof(*index_dir), GFP_KERNEL);
297 if (!index_dir)
298 return -ENOMEM;
299 index_dir->cache = cache;
300 index_dir->cpu = cpu;
301 rc = kobject_init_and_add(&index_dir->kobj, &cache_index_type,
302 cache_dir->kobj, "index%d", index);
303 if (rc)
304 goto out;
305 index_dir->next = cache_dir->index;
306 cache_dir->index = index_dir;
307 return 0;
308out:
309 kfree(index_dir);
310 return rc;
311}
312
313static int __cpuinit cache_add_cpu(int cpu)
314{
315 struct cache_dir *cache_dir;
316 struct cache *cache;
317 int rc, index = 0;
318
319 if (list_empty(&cache_list))
320 return 0;
321 cache_dir = cache_create_cache_dir(cpu);
322 if (!cache_dir)
323 return -ENOMEM;
324 list_for_each_entry(cache, &cache_list, list) {
325 if (!cache->private)
326 break;
327 rc = cache_create_index_dir(cache_dir, cache, index, cpu);
328 if (rc)
329 return rc;
330 index++;
331 }
332 return 0;
333}
334
335static void __cpuinit cache_remove_cpu(int cpu)
336{
337 struct cache_index_dir *index, *next;
338 struct cache_dir *cache_dir;
339
340 cache_dir = cache_dir_cpu[cpu];
341 if (!cache_dir)
342 return;
343 index = cache_dir->index;
344 while (index) {
345 next = index->next;
346 kobject_put(&index->kobj);
347 index = next;
348 }
349 kobject_put(cache_dir->kobj);
350 kfree(cache_dir);
351 cache_dir_cpu[cpu] = NULL;
352}
353
354static int __cpuinit cache_hotplug(struct notifier_block *nfb,
355 unsigned long action, void *hcpu)
356{
357 int cpu = (long)hcpu;
358 int rc = 0;
359
360 switch (action & ~CPU_TASKS_FROZEN) {
361 case CPU_ONLINE:
362 rc = cache_add_cpu(cpu);
363 if (rc)
364 cache_remove_cpu(cpu);
365 break;
366 case CPU_DEAD:
367 cache_remove_cpu(cpu);
368 break;
369 }
370 return rc ? NOTIFY_BAD : NOTIFY_OK;
371}
372
373static int __init cache_init(void)
374{
375 int cpu;
376
377 if (!test_facility(34))
378 return 0;
379 cache_build_info();
380 for_each_online_cpu(cpu)
381 cache_add_cpu(cpu);
382 hotcpu_notifier(cache_hotplug, 0);
383 return 0;
384}
385device_initcall(cache_init);
diff --git a/arch/s390/kernel/compat_linux.c b/arch/s390/kernel/compat_linux.c
index f606d935f495..189963c90c6e 100644
--- a/arch/s390/kernel/compat_linux.c
+++ b/arch/s390/kernel/compat_linux.c
@@ -131,13 +131,19 @@ asmlinkage long sys32_setresuid16(u16 ruid, u16 euid, u16 suid)
131 low2highuid(suid)); 131 low2highuid(suid));
132} 132}
133 133
134asmlinkage long sys32_getresuid16(u16 __user *ruid, u16 __user *euid, u16 __user *suid) 134asmlinkage long sys32_getresuid16(u16 __user *ruidp, u16 __user *euidp, u16 __user *suidp)
135{ 135{
136 const struct cred *cred = current_cred();
136 int retval; 137 int retval;
138 u16 ruid, euid, suid;
137 139
138 if (!(retval = put_user(high2lowuid(current->cred->uid), ruid)) && 140 ruid = high2lowuid(from_kuid_munged(cred->user_ns, cred->uid));
139 !(retval = put_user(high2lowuid(current->cred->euid), euid))) 141 euid = high2lowuid(from_kuid_munged(cred->user_ns, cred->euid));
140 retval = put_user(high2lowuid(current->cred->suid), suid); 142 suid = high2lowuid(from_kuid_munged(cred->user_ns, cred->suid));
143
144 if (!(retval = put_user(ruid, ruidp)) &&
145 !(retval = put_user(euid, euidp)))
146 retval = put_user(suid, suidp);
141 147
142 return retval; 148 return retval;
143} 149}
@@ -148,13 +154,19 @@ asmlinkage long sys32_setresgid16(u16 rgid, u16 egid, u16 sgid)
148 low2highgid(sgid)); 154 low2highgid(sgid));
149} 155}
150 156
151asmlinkage long sys32_getresgid16(u16 __user *rgid, u16 __user *egid, u16 __user *sgid) 157asmlinkage long sys32_getresgid16(u16 __user *rgidp, u16 __user *egidp, u16 __user *sgidp)
152{ 158{
159 const struct cred *cred = current_cred();
153 int retval; 160 int retval;
161 u16 rgid, egid, sgid;
162
163 rgid = high2lowgid(from_kgid_munged(cred->user_ns, cred->gid));
164 egid = high2lowgid(from_kgid_munged(cred->user_ns, cred->egid));
165 sgid = high2lowgid(from_kgid_munged(cred->user_ns, cred->sgid));
154 166
155 if (!(retval = put_user(high2lowgid(current->cred->gid), rgid)) && 167 if (!(retval = put_user(rgid, rgidp)) &&
156 !(retval = put_user(high2lowgid(current->cred->egid), egid))) 168 !(retval = put_user(egid, egidp)))
157 retval = put_user(high2lowgid(current->cred->sgid), sgid); 169 retval = put_user(sgid, sgidp);
158 170
159 return retval; 171 return retval;
160} 172}
@@ -258,22 +270,22 @@ asmlinkage long sys32_setgroups16(int gidsetsize, u16 __user *grouplist)
258 270
259asmlinkage long sys32_getuid16(void) 271asmlinkage long sys32_getuid16(void)
260{ 272{
261 return high2lowuid(current->cred->uid); 273 return high2lowuid(from_kuid_munged(current_user_ns(), current_uid()));
262} 274}
263 275
264asmlinkage long sys32_geteuid16(void) 276asmlinkage long sys32_geteuid16(void)
265{ 277{
266 return high2lowuid(current->cred->euid); 278 return high2lowuid(from_kuid_munged(current_user_ns(), current_euid()));
267} 279}
268 280
269asmlinkage long sys32_getgid16(void) 281asmlinkage long sys32_getgid16(void)
270{ 282{
271 return high2lowgid(current->cred->gid); 283 return high2lowgid(from_kgid_munged(current_user_ns(), current_gid()));
272} 284}
273 285
274asmlinkage long sys32_getegid16(void) 286asmlinkage long sys32_getegid16(void)
275{ 287{
276 return high2lowgid(current->cred->egid); 288 return high2lowgid(from_kgid_munged(current_user_ns(), current_egid()));
277} 289}
278 290
279/* 291/*
diff --git a/arch/s390/kernel/compat_wrapper.S b/arch/s390/kernel/compat_wrapper.S
index 2d82cfcbce5b..3afba804fe97 100644
--- a/arch/s390/kernel/compat_wrapper.S
+++ b/arch/s390/kernel/compat_wrapper.S
@@ -1646,3 +1646,16 @@ ENTRY(compat_sys_process_vm_writev_wrapper)
1646 llgf %r0,164(%r15) # unsigned long 1646 llgf %r0,164(%r15) # unsigned long
1647 stg %r0,160(%r15) 1647 stg %r0,160(%r15)
1648 jg compat_sys_process_vm_writev 1648 jg compat_sys_process_vm_writev
1649
1650ENTRY(sys_s390_runtime_instr_wrapper)
1651 lgfr %r2,%r2 # int
1652 lgfr %r3,%r3 # int
1653 jg sys_s390_runtime_instr
1654
1655ENTRY(sys_kcmp_wrapper)
1656 lgfr %r2,%r2 # pid_t
1657 lgfr %r3,%r3 # pid_t
1658 lgfr %r4,%r4 # int
1659 llgfr %r5,%r5 # unsigned long
1660 llgfr %r6,%r6 # unsigned long
1661 jg sys_kcmp
diff --git a/arch/s390/kernel/crash.c b/arch/s390/kernel/crash.c
deleted file mode 100644
index 3819153de8bd..000000000000
--- a/arch/s390/kernel/crash.c
+++ /dev/null
@@ -1,14 +0,0 @@
1/*
2 * Copyright IBM Corp. 2005
3 *
4 * Author(s): Heiko Carstens <heiko.carstens@de.ibm.com>
5 *
6 */
7
8#include <linux/threads.h>
9#include <linux/kexec.h>
10#include <linux/reboot.h>
11
12void machine_crash_shutdown(struct pt_regs *regs)
13{
14}
diff --git a/arch/s390/kernel/crash_dump.c b/arch/s390/kernel/crash_dump.c
index cc1172b26873..fb8d8781a011 100644
--- a/arch/s390/kernel/crash_dump.c
+++ b/arch/s390/kernel/crash_dump.c
@@ -13,8 +13,9 @@
13#include <linux/slab.h> 13#include <linux/slab.h>
14#include <linux/bootmem.h> 14#include <linux/bootmem.h>
15#include <linux/elf.h> 15#include <linux/elf.h>
16#include <asm/ipl.h>
17#include <asm/os_info.h> 16#include <asm/os_info.h>
17#include <asm/elf.h>
18#include <asm/ipl.h>
18 19
19#define PTR_ADD(x, y) (((char *) (x)) + ((unsigned long) (y))) 20#define PTR_ADD(x, y) (((char *) (x)) + ((unsigned long) (y)))
20#define PTR_SUB(x, y) (((char *) (x)) - ((unsigned long) (y))) 21#define PTR_SUB(x, y) (((char *) (x)) - ((unsigned long) (y)))
diff --git a/arch/s390/kernel/dis.c b/arch/s390/kernel/dis.c
index 619c5d350726..cc84a24c023f 100644
--- a/arch/s390/kernel/dis.c
+++ b/arch/s390/kernel/dis.c
@@ -315,6 +315,11 @@ enum {
315 LONG_INSN_POPCNT, 315 LONG_INSN_POPCNT,
316 LONG_INSN_RISBHG, 316 LONG_INSN_RISBHG,
317 LONG_INSN_RISBLG, 317 LONG_INSN_RISBLG,
318 LONG_INSN_RINEXT,
319 LONG_INSN_RIEMIT,
320 LONG_INSN_TABORT,
321 LONG_INSN_TBEGIN,
322 LONG_INSN_TBEGINC,
318}; 323};
319 324
320static char *long_insn_name[] = { 325static char *long_insn_name[] = {
@@ -329,7 +334,12 @@ static char *long_insn_name[] = {
329 [LONG_INSN_LLGHRL] = "llghrl", 334 [LONG_INSN_LLGHRL] = "llghrl",
330 [LONG_INSN_POPCNT] = "popcnt", 335 [LONG_INSN_POPCNT] = "popcnt",
331 [LONG_INSN_RISBHG] = "risbhg", 336 [LONG_INSN_RISBHG] = "risbhg",
332 [LONG_INSN_RISBLG] = "risblk", 337 [LONG_INSN_RISBLG] = "risblg",
338 [LONG_INSN_RINEXT] = "rinext",
339 [LONG_INSN_RIEMIT] = "riemit",
340 [LONG_INSN_TABORT] = "tabort",
341 [LONG_INSN_TBEGIN] = "tbegin",
342 [LONG_INSN_TBEGINC] = "tbeginc",
333}; 343};
334 344
335static struct insn opcode[] = { 345static struct insn opcode[] = {
@@ -582,6 +592,17 @@ static struct insn opcode_a7[] = {
582 { "", 0, INSTR_INVALID } 592 { "", 0, INSTR_INVALID }
583}; 593};
584 594
595static struct insn opcode_aa[] = {
596#ifdef CONFIG_64BIT
597 { { 0, LONG_INSN_RINEXT }, 0x00, INSTR_RI_RI },
598 { "rion", 0x01, INSTR_RI_RI },
599 { "tric", 0x02, INSTR_RI_RI },
600 { "rioff", 0x03, INSTR_RI_RI },
601 { { 0, LONG_INSN_RIEMIT }, 0x04, INSTR_RI_RI },
602#endif
603 { "", 0, INSTR_INVALID }
604};
605
585static struct insn opcode_b2[] = { 606static struct insn opcode_b2[] = {
586#ifdef CONFIG_64BIT 607#ifdef CONFIG_64BIT
587 { "sske", 0x2b, INSTR_RRF_M0RR }, 608 { "sske", 0x2b, INSTR_RRF_M0RR },
@@ -594,6 +615,9 @@ static struct insn opcode_b2[] = {
594 { "lpswe", 0xb2, INSTR_S_RD }, 615 { "lpswe", 0xb2, INSTR_S_RD },
595 { "srnmt", 0xb9, INSTR_S_RD }, 616 { "srnmt", 0xb9, INSTR_S_RD },
596 { "lfas", 0xbd, INSTR_S_RD }, 617 { "lfas", 0xbd, INSTR_S_RD },
618 { "etndg", 0xec, INSTR_RRE_R0 },
619 { { 0, LONG_INSN_TABORT }, 0xfc, INSTR_S_RD },
620 { "tend", 0xf8, INSTR_S_RD },
597#endif 621#endif
598 { "stidp", 0x02, INSTR_S_RD }, 622 { "stidp", 0x02, INSTR_S_RD },
599 { "sck", 0x04, INSTR_S_RD }, 623 { "sck", 0x04, INSTR_S_RD },
@@ -1150,6 +1174,7 @@ static struct insn opcode_e3[] = {
1150 { "stfh", 0xcb, INSTR_RXY_RRRD }, 1174 { "stfh", 0xcb, INSTR_RXY_RRRD },
1151 { "chf", 0xcd, INSTR_RXY_RRRD }, 1175 { "chf", 0xcd, INSTR_RXY_RRRD },
1152 { "clhf", 0xcf, INSTR_RXY_RRRD }, 1176 { "clhf", 0xcf, INSTR_RXY_RRRD },
1177 { "ntstg", 0x25, INSTR_RXY_RRRD },
1153#endif 1178#endif
1154 { "lrv", 0x1e, INSTR_RXY_RRRD }, 1179 { "lrv", 0x1e, INSTR_RXY_RRRD },
1155 { "lrvh", 0x1f, INSTR_RXY_RRRD }, 1180 { "lrvh", 0x1f, INSTR_RXY_RRRD },
@@ -1173,6 +1198,8 @@ static struct insn opcode_e5[] = {
1173 { "mvhhi", 0x44, INSTR_SIL_RDI }, 1198 { "mvhhi", 0x44, INSTR_SIL_RDI },
1174 { "mvhi", 0x4c, INSTR_SIL_RDI }, 1199 { "mvhi", 0x4c, INSTR_SIL_RDI },
1175 { "mvghi", 0x48, INSTR_SIL_RDI }, 1200 { "mvghi", 0x48, INSTR_SIL_RDI },
1201 { { 0, LONG_INSN_TBEGIN }, 0x60, INSTR_SIL_RDU },
1202 { { 0, LONG_INSN_TBEGINC }, 0x61, INSTR_SIL_RDU },
1176#endif 1203#endif
1177 { "lasp", 0x00, INSTR_SSE_RDRD }, 1204 { "lasp", 0x00, INSTR_SSE_RDRD },
1178 { "tprot", 0x01, INSTR_SSE_RDRD }, 1205 { "tprot", 0x01, INSTR_SSE_RDRD },
@@ -1210,6 +1237,9 @@ static struct insn opcode_eb[] = {
1210 { "cliy", 0x55, INSTR_SIY_URD }, 1237 { "cliy", 0x55, INSTR_SIY_URD },
1211 { "oiy", 0x56, INSTR_SIY_URD }, 1238 { "oiy", 0x56, INSTR_SIY_URD },
1212 { "xiy", 0x57, INSTR_SIY_URD }, 1239 { "xiy", 0x57, INSTR_SIY_URD },
1240 { "lric", 0x60, INSTR_RSY_RDRM },
1241 { "stric", 0x61, INSTR_RSY_RDRM },
1242 { "mric", 0x62, INSTR_RSY_RDRM },
1213 { "icmh", 0x80, INSTR_RSE_RURD }, 1243 { "icmh", 0x80, INSTR_RSE_RURD },
1214 { "icmh", 0x80, INSTR_RSY_RURD }, 1244 { "icmh", 0x80, INSTR_RSY_RURD },
1215 { "icmy", 0x81, INSTR_RSY_RURD }, 1245 { "icmy", 0x81, INSTR_RSY_RURD },
@@ -1408,6 +1438,9 @@ static struct insn *find_insn(unsigned char *code)
1408 case 0xa7: 1438 case 0xa7:
1409 table = opcode_a7; 1439 table = opcode_a7;
1410 break; 1440 break;
1441 case 0xaa:
1442 table = opcode_aa;
1443 break;
1411 case 0xb2: 1444 case 0xb2:
1412 table = opcode_b2; 1445 table = opcode_b2;
1413 break; 1446 break;
@@ -1601,3 +1634,26 @@ void show_code(struct pt_regs *regs)
1601 } 1634 }
1602 printk("\n"); 1635 printk("\n");
1603} 1636}
1637
1638void print_fn_code(unsigned char *code, unsigned long len)
1639{
1640 char buffer[64], *ptr;
1641 int opsize, i;
1642
1643 while (len) {
1644 ptr = buffer;
1645 opsize = insn_length(*code);
1646 ptr += sprintf(ptr, "%p: ", code);
1647 for (i = 0; i < opsize; i++)
1648 ptr += sprintf(ptr, "%02x", code[i]);
1649 *ptr++ = '\t';
1650 if (i < 4)
1651 *ptr++ = '\t';
1652 ptr += print_insn(ptr, code, (unsigned long) code);
1653 *ptr++ = '\n';
1654 *ptr++ = 0;
1655 printk(buffer);
1656 code += opsize;
1657 len -= opsize;
1658 }
1659}
diff --git a/arch/s390/kernel/early.c b/arch/s390/kernel/early.c
index 83c3271c442b..7f4717675c19 100644
--- a/arch/s390/kernel/early.c
+++ b/arch/s390/kernel/early.c
@@ -215,36 +215,54 @@ static noinline __init void init_kernel_storage_key(void)
215 PAGE_DEFAULT_KEY, 0); 215 PAGE_DEFAULT_KEY, 0);
216} 216}
217 217
218static __initdata struct sysinfo_3_2_2 vmms __aligned(PAGE_SIZE); 218static __initdata char sysinfo_page[PAGE_SIZE] __aligned(PAGE_SIZE);
219 219
220static noinline __init void detect_machine_type(void) 220static noinline __init void detect_machine_type(void)
221{ 221{
222 struct sysinfo_3_2_2 *vmms = (struct sysinfo_3_2_2 *)&sysinfo_page;
223
222 /* Check current-configuration-level */ 224 /* Check current-configuration-level */
223 if ((stsi(NULL, 0, 0, 0) >> 28) <= 2) { 225 if (stsi(NULL, 0, 0, 0) <= 2) {
224 S390_lowcore.machine_flags |= MACHINE_FLAG_LPAR; 226 S390_lowcore.machine_flags |= MACHINE_FLAG_LPAR;
225 return; 227 return;
226 } 228 }
227 /* Get virtual-machine cpu information. */ 229 /* Get virtual-machine cpu information. */
228 if (stsi(&vmms, 3, 2, 2) == -ENOSYS || !vmms.count) 230 if (stsi(vmms, 3, 2, 2) || !vmms->count)
229 return; 231 return;
230 232
231 /* Running under KVM? If not we assume z/VM */ 233 /* Running under KVM? If not we assume z/VM */
232 if (!memcmp(vmms.vm[0].cpi, "\xd2\xe5\xd4", 3)) 234 if (!memcmp(vmms->vm[0].cpi, "\xd2\xe5\xd4", 3))
233 S390_lowcore.machine_flags |= MACHINE_FLAG_KVM; 235 S390_lowcore.machine_flags |= MACHINE_FLAG_KVM;
234 else 236 else
235 S390_lowcore.machine_flags |= MACHINE_FLAG_VM; 237 S390_lowcore.machine_flags |= MACHINE_FLAG_VM;
236} 238}
237 239
240static __init void setup_topology(void)
241{
242#ifdef CONFIG_64BIT
243 int max_mnest;
244
245 if (!test_facility(11))
246 return;
247 S390_lowcore.machine_flags |= MACHINE_FLAG_TOPOLOGY;
248 for (max_mnest = 6; max_mnest > 1; max_mnest--) {
249 if (stsi(&sysinfo_page, 15, 1, max_mnest) == 0)
250 break;
251 }
252 topology_max_mnest = max_mnest;
253#endif
254}
255
238static void early_pgm_check_handler(void) 256static void early_pgm_check_handler(void)
239{ 257{
240 unsigned long addr;
241 const struct exception_table_entry *fixup; 258 const struct exception_table_entry *fixup;
259 unsigned long addr;
242 260
243 addr = S390_lowcore.program_old_psw.addr; 261 addr = S390_lowcore.program_old_psw.addr;
244 fixup = search_exception_tables(addr & PSW_ADDR_INSN); 262 fixup = search_exception_tables(addr & PSW_ADDR_INSN);
245 if (!fixup) 263 if (!fixup)
246 disabled_wait(0); 264 disabled_wait(0);
247 S390_lowcore.program_old_psw.addr = fixup->fixup | PSW_ADDR_AMODE; 265 S390_lowcore.program_old_psw.addr = extable_fixup(fixup)|PSW_ADDR_AMODE;
248} 266}
249 267
250static noinline __init void setup_lowcore_early(void) 268static noinline __init void setup_lowcore_early(void)
@@ -267,12 +285,10 @@ static noinline __init void setup_facility_list(void)
267 285
268static noinline __init void setup_hpage(void) 286static noinline __init void setup_hpage(void)
269{ 287{
270#ifndef CONFIG_DEBUG_PAGEALLOC
271 if (!test_facility(2) || !test_facility(8)) 288 if (!test_facility(2) || !test_facility(8))
272 return; 289 return;
273 S390_lowcore.machine_flags |= MACHINE_FLAG_HPAGE; 290 S390_lowcore.machine_flags |= MACHINE_FLAG_HPAGE;
274 __ctl_set_bit(0, 23); 291 __ctl_set_bit(0, 23);
275#endif
276} 292}
277 293
278static __init void detect_mvpg(void) 294static __init void detect_mvpg(void)
@@ -366,12 +382,12 @@ static __init void detect_machine_facilities(void)
366 S390_lowcore.machine_flags |= MACHINE_FLAG_IDTE; 382 S390_lowcore.machine_flags |= MACHINE_FLAG_IDTE;
367 if (test_facility(8)) 383 if (test_facility(8))
368 S390_lowcore.machine_flags |= MACHINE_FLAG_PFMF; 384 S390_lowcore.machine_flags |= MACHINE_FLAG_PFMF;
369 if (test_facility(11))
370 S390_lowcore.machine_flags |= MACHINE_FLAG_TOPOLOGY;
371 if (test_facility(27)) 385 if (test_facility(27))
372 S390_lowcore.machine_flags |= MACHINE_FLAG_MVCOS; 386 S390_lowcore.machine_flags |= MACHINE_FLAG_MVCOS;
373 if (test_facility(40)) 387 if (test_facility(40))
374 S390_lowcore.machine_flags |= MACHINE_FLAG_SPP; 388 S390_lowcore.machine_flags |= MACHINE_FLAG_SPP;
389 if (test_facility(50) && test_facility(73))
390 S390_lowcore.machine_flags |= MACHINE_FLAG_TE;
375#endif 391#endif
376} 392}
377 393
@@ -441,7 +457,6 @@ static void __init setup_boot_command_line(void)
441 append_to_cmdline(append_ipl_scpdata); 457 append_to_cmdline(append_ipl_scpdata);
442} 458}
443 459
444
445/* 460/*
446 * Save ipl parameters, clear bss memory, initialize storage keys 461 * Save ipl parameters, clear bss memory, initialize storage keys
447 * and create a kernel NSS at startup if the SAVESYS= parm is defined 462 * and create a kernel NSS at startup if the SAVESYS= parm is defined
@@ -468,6 +483,7 @@ void __init startup_init(void)
468 detect_diag44(); 483 detect_diag44();
469 detect_machine_facilities(); 484 detect_machine_facilities();
470 setup_hpage(); 485 setup_hpage();
486 setup_topology();
471 sclp_facilities_detect(); 487 sclp_facilities_detect();
472 detect_memory_layout(memory_chunk); 488 detect_memory_layout(memory_chunk);
473#ifdef CONFIG_DYNAMIC_FTRACE 489#ifdef CONFIG_DYNAMIC_FTRACE
diff --git a/arch/s390/kernel/entry64.S b/arch/s390/kernel/entry64.S
index 349b7eeb348a..7549985402f7 100644
--- a/arch/s390/kernel/entry64.S
+++ b/arch/s390/kernel/entry64.S
@@ -10,6 +10,7 @@
10 10
11#include <linux/init.h> 11#include <linux/init.h>
12#include <linux/linkage.h> 12#include <linux/linkage.h>
13#include <asm/processor.h>
13#include <asm/cache.h> 14#include <asm/cache.h>
14#include <asm/errno.h> 15#include <asm/errno.h>
15#include <asm/ptrace.h> 16#include <asm/ptrace.h>
@@ -412,6 +413,11 @@ ENTRY(pgm_check_handler)
4121: UPDATE_VTIME %r14,__LC_SYNC_ENTER_TIMER 4131: UPDATE_VTIME %r14,__LC_SYNC_ENTER_TIMER
413 LAST_BREAK %r14 414 LAST_BREAK %r14
414 lg %r15,__LC_KERNEL_STACK 415 lg %r15,__LC_KERNEL_STACK
416 lg %r14,__TI_task(%r12)
417 lghi %r13,__LC_PGM_TDB
418 tm __LC_PGM_ILC+2,0x02 # check for transaction abort
419 jz 2f
420 mvc __THREAD_trap_tdb(256,%r14),0(%r13)
4152: aghi %r15,-(STACK_FRAME_OVERHEAD + __PT_SIZE) 4212: aghi %r15,-(STACK_FRAME_OVERHEAD + __PT_SIZE)
416 la %r11,STACK_FRAME_OVERHEAD(%r15) 422 la %r11,STACK_FRAME_OVERHEAD(%r15)
417 stmg %r0,%r7,__PT_R0(%r11) 423 stmg %r0,%r7,__PT_R0(%r11)
@@ -422,13 +428,12 @@ ENTRY(pgm_check_handler)
422 stg %r10,__PT_ARGS(%r11) 428 stg %r10,__PT_ARGS(%r11)
423 tm __LC_PGM_ILC+3,0x80 # check for per exception 429 tm __LC_PGM_ILC+3,0x80 # check for per exception
424 jz 0f 430 jz 0f
425 lg %r1,__TI_task(%r12)
426 tmhh %r8,0x0001 # kernel per event ? 431 tmhh %r8,0x0001 # kernel per event ?
427 jz pgm_kprobe 432 jz pgm_kprobe
428 oi __TI_flags+7(%r12),_TIF_PER_TRAP 433 oi __TI_flags+7(%r12),_TIF_PER_TRAP
429 mvc __THREAD_per_address(8,%r1),__LC_PER_ADDRESS 434 mvc __THREAD_per_address(8,%r14),__LC_PER_ADDRESS
430 mvc __THREAD_per_cause(2,%r1),__LC_PER_CAUSE 435 mvc __THREAD_per_cause(2,%r14),__LC_PER_CAUSE
431 mvc __THREAD_per_paid(1,%r1),__LC_PER_PAID 436 mvc __THREAD_per_paid(1,%r14),__LC_PER_PAID
4320: REENABLE_IRQS 4370: REENABLE_IRQS
433 xc __SF_BACKCHAIN(8,%r15),__SF_BACKCHAIN(%r15) 438 xc __SF_BACKCHAIN(8,%r15),__SF_BACKCHAIN(%r15)
434 larl %r1,pgm_check_table 439 larl %r1,pgm_check_table
@@ -1004,9 +1009,7 @@ sie_fault:
1004.Lhost_id: 1009.Lhost_id:
1005 .quad 0 1010 .quad 0
1006 1011
1007 .section __ex_table,"a" 1012 EX_TABLE(sie_loop,sie_fault)
1008 .quad sie_loop,sie_fault
1009 .previous
1010#endif 1013#endif
1011 1014
1012 .section .rodata, "a" 1015 .section .rodata, "a"
diff --git a/arch/s390/kernel/irq.c b/arch/s390/kernel/irq.c
index dd7630d8aab7..6cdc55b26d68 100644
--- a/arch/s390/kernel/irq.c
+++ b/arch/s390/kernel/irq.c
@@ -30,33 +30,35 @@ struct irq_class {
30}; 30};
31 31
32static const struct irq_class intrclass_names[] = { 32static const struct irq_class intrclass_names[] = {
33 {.name = "EXT" }, 33 [EXTERNAL_INTERRUPT] = {.name = "EXT"},
34 {.name = "I/O" }, 34 [IO_INTERRUPT] = {.name = "I/O"},
35 {.name = "CLK", .desc = "[EXT] Clock Comparator" }, 35 [EXTINT_CLK] = {.name = "CLK", .desc = "[EXT] Clock Comparator"},
36 {.name = "EXC", .desc = "[EXT] External Call" }, 36 [EXTINT_EXC] = {.name = "EXC", .desc = "[EXT] External Call"},
37 {.name = "EMS", .desc = "[EXT] Emergency Signal" }, 37 [EXTINT_EMS] = {.name = "EMS", .desc = "[EXT] Emergency Signal"},
38 {.name = "TMR", .desc = "[EXT] CPU Timer" }, 38 [EXTINT_TMR] = {.name = "TMR", .desc = "[EXT] CPU Timer"},
39 {.name = "TAL", .desc = "[EXT] Timing Alert" }, 39 [EXTINT_TLA] = {.name = "TAL", .desc = "[EXT] Timing Alert"},
40 {.name = "PFL", .desc = "[EXT] Pseudo Page Fault" }, 40 [EXTINT_PFL] = {.name = "PFL", .desc = "[EXT] Pseudo Page Fault"},
41 {.name = "DSD", .desc = "[EXT] DASD Diag" }, 41 [EXTINT_DSD] = {.name = "DSD", .desc = "[EXT] DASD Diag"},
42 {.name = "VRT", .desc = "[EXT] Virtio" }, 42 [EXTINT_VRT] = {.name = "VRT", .desc = "[EXT] Virtio"},
43 {.name = "SCP", .desc = "[EXT] Service Call" }, 43 [EXTINT_SCP] = {.name = "SCP", .desc = "[EXT] Service Call"},
44 {.name = "IUC", .desc = "[EXT] IUCV" }, 44 [EXTINT_IUC] = {.name = "IUC", .desc = "[EXT] IUCV"},
45 {.name = "CMS", .desc = "[EXT] CPU-Measurement: Sampling" }, 45 [EXTINT_CMS] = {.name = "CMS", .desc = "[EXT] CPU-Measurement: Sampling"},
46 {.name = "CMC", .desc = "[EXT] CPU-Measurement: Counter" }, 46 [EXTINT_CMC] = {.name = "CMC", .desc = "[EXT] CPU-Measurement: Counter"},
47 {.name = "CIO", .desc = "[I/O] Common I/O Layer Interrupt" }, 47 [EXTINT_CMR] = {.name = "CMR", .desc = "[EXT] CPU-Measurement: RI"},
48 {.name = "QAI", .desc = "[I/O] QDIO Adapter Interrupt" }, 48 [IOINT_CIO] = {.name = "CIO", .desc = "[I/O] Common I/O Layer Interrupt"},
49 {.name = "DAS", .desc = "[I/O] DASD" }, 49 [IOINT_QAI] = {.name = "QAI", .desc = "[I/O] QDIO Adapter Interrupt"},
50 {.name = "C15", .desc = "[I/O] 3215" }, 50 [IOINT_DAS] = {.name = "DAS", .desc = "[I/O] DASD"},
51 {.name = "C70", .desc = "[I/O] 3270" }, 51 [IOINT_C15] = {.name = "C15", .desc = "[I/O] 3215"},
52 {.name = "TAP", .desc = "[I/O] Tape" }, 52 [IOINT_C70] = {.name = "C70", .desc = "[I/O] 3270"},
53 {.name = "VMR", .desc = "[I/O] Unit Record Devices" }, 53 [IOINT_TAP] = {.name = "TAP", .desc = "[I/O] Tape"},
54 {.name = "LCS", .desc = "[I/O] LCS" }, 54 [IOINT_VMR] = {.name = "VMR", .desc = "[I/O] Unit Record Devices"},
55 {.name = "CLW", .desc = "[I/O] CLAW" }, 55 [IOINT_LCS] = {.name = "LCS", .desc = "[I/O] LCS"},
56 {.name = "CTC", .desc = "[I/O] CTC" }, 56 [IOINT_CLW] = {.name = "CLW", .desc = "[I/O] CLAW"},
57 {.name = "APB", .desc = "[I/O] AP Bus" }, 57 [IOINT_CTC] = {.name = "CTC", .desc = "[I/O] CTC"},
58 {.name = "CSC", .desc = "[I/O] CHSC Subchannel" }, 58 [IOINT_APB] = {.name = "APB", .desc = "[I/O] AP Bus"},
59 {.name = "NMI", .desc = "[NMI] Machine Check" }, 59 [IOINT_ADM] = {.name = "ADM", .desc = "[I/O] EADM Subchannel"},
60 [IOINT_CSC] = {.name = "CSC", .desc = "[I/O] CHSC Subchannel"},
61 [NMI_NMI] = {.name = "NMI", .desc = "[NMI] Machine Check"},
60}; 62};
61 63
62/* 64/*
diff --git a/arch/s390/kernel/kprobes.c b/arch/s390/kernel/kprobes.c
index 8aa634f5944b..d1c7214e157c 100644
--- a/arch/s390/kernel/kprobes.c
+++ b/arch/s390/kernel/kprobes.c
@@ -547,7 +547,7 @@ static int __kprobes kprobe_trap_handler(struct pt_regs *regs, int trapnr)
547 */ 547 */
548 entry = search_exception_tables(regs->psw.addr & PSW_ADDR_INSN); 548 entry = search_exception_tables(regs->psw.addr & PSW_ADDR_INSN);
549 if (entry) { 549 if (entry) {
550 regs->psw.addr = entry->fixup | PSW_ADDR_AMODE; 550 regs->psw.addr = extable_fixup(entry) | PSW_ADDR_AMODE;
551 return 1; 551 return 1;
552 } 552 }
553 553
diff --git a/arch/s390/kernel/lgr.c b/arch/s390/kernel/lgr.c
index eca94e74d19a..6ea6d69339b5 100644
--- a/arch/s390/kernel/lgr.c
+++ b/arch/s390/kernel/lgr.c
@@ -51,16 +51,6 @@ static struct lgr_info lgr_info_cur;
51static struct debug_info *lgr_dbf; 51static struct debug_info *lgr_dbf;
52 52
53/* 53/*
54 * Return number of valid stsi levels
55 */
56static inline int stsi_0(void)
57{
58 int rc = stsi(NULL, 0, 0, 0);
59
60 return rc == -ENOSYS ? rc : (((unsigned int) rc) >> 28);
61}
62
63/*
64 * Copy buffer and then convert it to ASCII 54 * Copy buffer and then convert it to ASCII
65 */ 55 */
66static void cpascii(char *dst, char *src, int size) 56static void cpascii(char *dst, char *src, int size)
@@ -76,7 +66,7 @@ static void lgr_stsi_1_1_1(struct lgr_info *lgr_info)
76{ 66{
77 struct sysinfo_1_1_1 *si = (void *) lgr_page; 67 struct sysinfo_1_1_1 *si = (void *) lgr_page;
78 68
79 if (stsi(si, 1, 1, 1) == -ENOSYS) 69 if (stsi(si, 1, 1, 1))
80 return; 70 return;
81 cpascii(lgr_info->manufacturer, si->manufacturer, 71 cpascii(lgr_info->manufacturer, si->manufacturer,
82 sizeof(si->manufacturer)); 72 sizeof(si->manufacturer));
@@ -93,7 +83,7 @@ static void lgr_stsi_2_2_2(struct lgr_info *lgr_info)
93{ 83{
94 struct sysinfo_2_2_2 *si = (void *) lgr_page; 84 struct sysinfo_2_2_2 *si = (void *) lgr_page;
95 85
96 if (stsi(si, 2, 2, 2) == -ENOSYS) 86 if (stsi(si, 2, 2, 2))
97 return; 87 return;
98 cpascii(lgr_info->name, si->name, sizeof(si->name)); 88 cpascii(lgr_info->name, si->name, sizeof(si->name));
99 memcpy(&lgr_info->lpar_number, &si->lpar_number, 89 memcpy(&lgr_info->lpar_number, &si->lpar_number,
@@ -108,7 +98,7 @@ static void lgr_stsi_3_2_2(struct lgr_info *lgr_info)
108 struct sysinfo_3_2_2 *si = (void *) lgr_page; 98 struct sysinfo_3_2_2 *si = (void *) lgr_page;
109 int i; 99 int i;
110 100
111 if (stsi(si, 3, 2, 2) == -ENOSYS) 101 if (stsi(si, 3, 2, 2))
112 return; 102 return;
113 for (i = 0; i < min_t(u8, si->count, VM_LEVEL_MAX); i++) { 103 for (i = 0; i < min_t(u8, si->count, VM_LEVEL_MAX); i++) {
114 cpascii(lgr_info->vm[i].name, si->vm[i].name, 104 cpascii(lgr_info->vm[i].name, si->vm[i].name,
@@ -124,16 +114,17 @@ static void lgr_stsi_3_2_2(struct lgr_info *lgr_info)
124 */ 114 */
125static void lgr_info_get(struct lgr_info *lgr_info) 115static void lgr_info_get(struct lgr_info *lgr_info)
126{ 116{
117 int level;
118
127 memset(lgr_info, 0, sizeof(*lgr_info)); 119 memset(lgr_info, 0, sizeof(*lgr_info));
128 stfle(lgr_info->stfle_fac_list, ARRAY_SIZE(lgr_info->stfle_fac_list)); 120 stfle(lgr_info->stfle_fac_list, ARRAY_SIZE(lgr_info->stfle_fac_list));
129 lgr_info->level = stsi_0(); 121 level = stsi(NULL, 0, 0, 0);
130 if (lgr_info->level == -ENOSYS) 122 lgr_info->level = level;
131 return; 123 if (level >= 1)
132 if (lgr_info->level >= 1)
133 lgr_stsi_1_1_1(lgr_info); 124 lgr_stsi_1_1_1(lgr_info);
134 if (lgr_info->level >= 2) 125 if (level >= 2)
135 lgr_stsi_2_2_2(lgr_info); 126 lgr_stsi_2_2_2(lgr_info);
136 if (lgr_info->level >= 3) 127 if (level >= 3)
137 lgr_stsi_3_2_2(lgr_info); 128 lgr_stsi_3_2_2(lgr_info);
138} 129}
139 130
diff --git a/arch/s390/kernel/machine_kexec.c b/arch/s390/kernel/machine_kexec.c
index 493304bdf1c7..b3de27700016 100644
--- a/arch/s390/kernel/machine_kexec.c
+++ b/arch/s390/kernel/machine_kexec.c
@@ -21,6 +21,7 @@
21#include <asm/reset.h> 21#include <asm/reset.h>
22#include <asm/ipl.h> 22#include <asm/ipl.h>
23#include <asm/diag.h> 23#include <asm/diag.h>
24#include <asm/elf.h>
24#include <asm/asm-offsets.h> 25#include <asm/asm-offsets.h>
25#include <asm/os_info.h> 26#include <asm/os_info.h>
26 27
@@ -31,8 +32,6 @@ extern const unsigned long long relocate_kernel_len;
31 32
32#ifdef CONFIG_CRASH_DUMP 33#ifdef CONFIG_CRASH_DUMP
33 34
34void *fill_cpu_elf_notes(void *ptr, struct save_area *sa);
35
36/* 35/*
37 * Create ELF notes for one CPU 36 * Create ELF notes for one CPU
38 */ 37 */
@@ -159,7 +158,7 @@ int machine_kexec_prepare(struct kimage *image)
159 158
160 /* Can't replace kernel image since it is read-only. */ 159 /* Can't replace kernel image since it is read-only. */
161 if (ipl_flags & IPL_NSS_VALID) 160 if (ipl_flags & IPL_NSS_VALID)
162 return -ENOSYS; 161 return -EOPNOTSUPP;
163 162
164 if (image->type == KEXEC_TYPE_CRASH) 163 if (image->type == KEXEC_TYPE_CRASH)
165 return machine_kexec_prepare_kdump(); 164 return machine_kexec_prepare_kdump();
@@ -191,6 +190,10 @@ void machine_shutdown(void)
191{ 190{
192} 191}
193 192
193void machine_crash_shutdown(struct pt_regs *regs)
194{
195}
196
194/* 197/*
195 * Do normal kexec 198 * Do normal kexec
196 */ 199 */
diff --git a/arch/s390/kernel/process.c b/arch/s390/kernel/process.c
index 733175373a4c..5024be27df44 100644
--- a/arch/s390/kernel/process.c
+++ b/arch/s390/kernel/process.c
@@ -26,10 +26,12 @@
26#include <asm/io.h> 26#include <asm/io.h>
27#include <asm/processor.h> 27#include <asm/processor.h>
28#include <asm/vtimer.h> 28#include <asm/vtimer.h>
29#include <asm/exec.h>
29#include <asm/irq.h> 30#include <asm/irq.h>
30#include <asm/nmi.h> 31#include <asm/nmi.h>
31#include <asm/smp.h> 32#include <asm/smp.h>
32#include <asm/switch_to.h> 33#include <asm/switch_to.h>
34#include <asm/runtime_instr.h>
33#include "entry.h" 35#include "entry.h"
34 36
35asmlinkage void ret_from_fork(void) asm ("ret_from_fork"); 37asmlinkage void ret_from_fork(void) asm ("ret_from_fork");
@@ -132,6 +134,7 @@ EXPORT_SYMBOL(kernel_thread);
132 */ 134 */
133void exit_thread(void) 135void exit_thread(void)
134{ 136{
137 exit_thread_runtime_instr();
135} 138}
136 139
137void flush_thread(void) 140void flush_thread(void)
@@ -170,6 +173,11 @@ int copy_thread(unsigned long clone_flags, unsigned long new_stackp,
170 /* Save access registers to new thread structure. */ 173 /* Save access registers to new thread structure. */
171 save_access_regs(&p->thread.acrs[0]); 174 save_access_regs(&p->thread.acrs[0]);
172 175
176 /* Don't copy runtime instrumentation info */
177 p->thread.ri_cb = NULL;
178 p->thread.ri_signum = 0;
179 frame->childregs.psw.mask &= ~PSW_MASK_RI;
180
173#ifndef CONFIG_64BIT 181#ifndef CONFIG_64BIT
174 /* 182 /*
175 * save fprs to current->thread.fp_regs to merge them with 183 * save fprs to current->thread.fp_regs to merge them with
diff --git a/arch/s390/kernel/processor.c b/arch/s390/kernel/processor.c
index 572d4c9cb33b..753c41d0ffd3 100644
--- a/arch/s390/kernel/processor.c
+++ b/arch/s390/kernel/processor.c
@@ -39,9 +39,9 @@ void __cpuinit cpu_init(void)
39 */ 39 */
40static int show_cpuinfo(struct seq_file *m, void *v) 40static int show_cpuinfo(struct seq_file *m, void *v)
41{ 41{
42 static const char *hwcap_str[10] = { 42 static const char *hwcap_str[] = {
43 "esan3", "zarch", "stfle", "msa", "ldisp", "eimm", "dfp", 43 "esan3", "zarch", "stfle", "msa", "ldisp", "eimm", "dfp",
44 "edat", "etf3eh", "highgprs" 44 "edat", "etf3eh", "highgprs", "te"
45 }; 45 };
46 unsigned long n = (unsigned long) v - 1; 46 unsigned long n = (unsigned long) v - 1;
47 int i; 47 int i;
@@ -54,10 +54,11 @@ static int show_cpuinfo(struct seq_file *m, void *v)
54 num_online_cpus(), loops_per_jiffy/(500000/HZ), 54 num_online_cpus(), loops_per_jiffy/(500000/HZ),
55 (loops_per_jiffy/(5000/HZ))%100); 55 (loops_per_jiffy/(5000/HZ))%100);
56 seq_puts(m, "features\t: "); 56 seq_puts(m, "features\t: ");
57 for (i = 0; i < 10; i++) 57 for (i = 0; i < ARRAY_SIZE(hwcap_str); i++)
58 if (hwcap_str[i] && (elf_hwcap & (1UL << i))) 58 if (hwcap_str[i] && (elf_hwcap & (1UL << i)))
59 seq_printf(m, "%s ", hwcap_str[i]); 59 seq_printf(m, "%s ", hwcap_str[i]);
60 seq_puts(m, "\n"); 60 seq_puts(m, "\n");
61 show_cacheinfo(m);
61 } 62 }
62 get_online_cpus(); 63 get_online_cpus();
63 if (cpu_online(n)) { 64 if (cpu_online(n)) {
diff --git a/arch/s390/kernel/ptrace.c b/arch/s390/kernel/ptrace.c
index e4be113fbac6..a314c57f4e94 100644
--- a/arch/s390/kernel/ptrace.c
+++ b/arch/s390/kernel/ptrace.c
@@ -42,6 +42,7 @@ enum s390_regset {
42 REGSET_GENERAL, 42 REGSET_GENERAL,
43 REGSET_FP, 43 REGSET_FP,
44 REGSET_LAST_BREAK, 44 REGSET_LAST_BREAK,
45 REGSET_TDB,
45 REGSET_SYSTEM_CALL, 46 REGSET_SYSTEM_CALL,
46 REGSET_GENERAL_EXTENDED, 47 REGSET_GENERAL_EXTENDED,
47}; 48};
@@ -52,6 +53,22 @@ void update_per_regs(struct task_struct *task)
52 struct thread_struct *thread = &task->thread; 53 struct thread_struct *thread = &task->thread;
53 struct per_regs old, new; 54 struct per_regs old, new;
54 55
56#ifdef CONFIG_64BIT
57 /* Take care of the enable/disable of transactional execution. */
58 if (MACHINE_HAS_TE) {
59 unsigned long cr0, cr0_new;
60
61 __ctl_store(cr0, 0, 0);
62 /* set or clear transaction execution bits 8 and 9. */
63 if (task->thread.per_flags & PER_FLAG_NO_TE)
64 cr0_new = cr0 & ~(3UL << 54);
65 else
66 cr0_new = cr0 | (3UL << 54);
67 /* Only load control register 0 if necessary. */
68 if (cr0 != cr0_new)
69 __ctl_load(cr0_new, 0, 0);
70 }
71#endif
55 /* Copy user specified PER registers */ 72 /* Copy user specified PER registers */
56 new.control = thread->per_user.control; 73 new.control = thread->per_user.control;
57 new.start = thread->per_user.start; 74 new.start = thread->per_user.start;
@@ -60,6 +77,10 @@ void update_per_regs(struct task_struct *task)
60 /* merge TIF_SINGLE_STEP into user specified PER registers. */ 77 /* merge TIF_SINGLE_STEP into user specified PER registers. */
61 if (test_tsk_thread_flag(task, TIF_SINGLE_STEP)) { 78 if (test_tsk_thread_flag(task, TIF_SINGLE_STEP)) {
62 new.control |= PER_EVENT_IFETCH; 79 new.control |= PER_EVENT_IFETCH;
80#ifdef CONFIG_64BIT
81 new.control |= PER_CONTROL_SUSPENSION;
82 new.control |= PER_EVENT_TRANSACTION_END;
83#endif
63 new.start = 0; 84 new.start = 0;
64 new.end = PSW_ADDR_INSN; 85 new.end = PSW_ADDR_INSN;
65 } 86 }
@@ -100,6 +121,7 @@ void ptrace_disable(struct task_struct *task)
100 memset(&task->thread.per_event, 0, sizeof(task->thread.per_event)); 121 memset(&task->thread.per_event, 0, sizeof(task->thread.per_event));
101 clear_tsk_thread_flag(task, TIF_SINGLE_STEP); 122 clear_tsk_thread_flag(task, TIF_SINGLE_STEP);
102 clear_tsk_thread_flag(task, TIF_PER_TRAP); 123 clear_tsk_thread_flag(task, TIF_PER_TRAP);
124 task->thread.per_flags = 0;
103} 125}
104 126
105#ifndef CONFIG_64BIT 127#ifndef CONFIG_64BIT
@@ -416,6 +438,16 @@ long arch_ptrace(struct task_struct *child, long request,
416 put_user(task_thread_info(child)->last_break, 438 put_user(task_thread_info(child)->last_break,
417 (unsigned long __user *) data); 439 (unsigned long __user *) data);
418 return 0; 440 return 0;
441 case PTRACE_ENABLE_TE:
442 if (!MACHINE_HAS_TE)
443 return -EIO;
444 child->thread.per_flags &= ~PER_FLAG_NO_TE;
445 return 0;
446 case PTRACE_DISABLE_TE:
447 if (!MACHINE_HAS_TE)
448 return -EIO;
449 child->thread.per_flags |= PER_FLAG_NO_TE;
450 return 0;
419 default: 451 default:
420 /* Removing high order bit from addr (only for 31 bit). */ 452 /* Removing high order bit from addr (only for 31 bit). */
421 addr &= PSW_ADDR_INSN; 453 addr &= PSW_ADDR_INSN;
@@ -903,6 +935,28 @@ static int s390_last_break_set(struct task_struct *target,
903 return 0; 935 return 0;
904} 936}
905 937
938static int s390_tdb_get(struct task_struct *target,
939 const struct user_regset *regset,
940 unsigned int pos, unsigned int count,
941 void *kbuf, void __user *ubuf)
942{
943 struct pt_regs *regs = task_pt_regs(target);
944 unsigned char *data;
945
946 if (!(regs->int_code & 0x200))
947 return -ENODATA;
948 data = target->thread.trap_tdb;
949 return user_regset_copyout(&pos, &count, &kbuf, &ubuf, data, 0, 256);
950}
951
952static int s390_tdb_set(struct task_struct *target,
953 const struct user_regset *regset,
954 unsigned int pos, unsigned int count,
955 const void *kbuf, const void __user *ubuf)
956{
957 return 0;
958}
959
906#endif 960#endif
907 961
908static int s390_system_call_get(struct task_struct *target, 962static int s390_system_call_get(struct task_struct *target,
@@ -951,6 +1005,14 @@ static const struct user_regset s390_regsets[] = {
951 .get = s390_last_break_get, 1005 .get = s390_last_break_get,
952 .set = s390_last_break_set, 1006 .set = s390_last_break_set,
953 }, 1007 },
1008 [REGSET_TDB] = {
1009 .core_note_type = NT_S390_TDB,
1010 .n = 1,
1011 .size = 256,
1012 .align = 1,
1013 .get = s390_tdb_get,
1014 .set = s390_tdb_set,
1015 },
954#endif 1016#endif
955 [REGSET_SYSTEM_CALL] = { 1017 [REGSET_SYSTEM_CALL] = {
956 .core_note_type = NT_S390_SYSTEM_CALL, 1018 .core_note_type = NT_S390_SYSTEM_CALL,
@@ -1148,6 +1210,14 @@ static const struct user_regset s390_compat_regsets[] = {
1148 .get = s390_compat_last_break_get, 1210 .get = s390_compat_last_break_get,
1149 .set = s390_compat_last_break_set, 1211 .set = s390_compat_last_break_set,
1150 }, 1212 },
1213 [REGSET_TDB] = {
1214 .core_note_type = NT_S390_TDB,
1215 .n = 1,
1216 .size = 256,
1217 .align = 1,
1218 .get = s390_tdb_get,
1219 .set = s390_tdb_set,
1220 },
1151 [REGSET_SYSTEM_CALL] = { 1221 [REGSET_SYSTEM_CALL] = {
1152 .core_note_type = NT_S390_SYSTEM_CALL, 1222 .core_note_type = NT_S390_SYSTEM_CALL,
1153 .n = 1, 1223 .n = 1,
diff --git a/arch/s390/kernel/runtime_instr.c b/arch/s390/kernel/runtime_instr.c
new file mode 100644
index 000000000000..61066f6f71a5
--- /dev/null
+++ b/arch/s390/kernel/runtime_instr.c
@@ -0,0 +1,150 @@
1/*
2 * Copyright IBM Corp. 2012
3 * Author(s): Jan Glauber <jang@linux.vnet.ibm.com>
4 */
5
6#include <linux/kernel.h>
7#include <linux/syscalls.h>
8#include <linux/signal.h>
9#include <linux/mm.h>
10#include <linux/slab.h>
11#include <linux/init.h>
12#include <linux/errno.h>
13#include <linux/kernel_stat.h>
14#include <asm/runtime_instr.h>
15#include <asm/cpu_mf.h>
16#include <asm/irq.h>
17
18/* empty control block to disable RI by loading it */
19struct runtime_instr_cb runtime_instr_empty_cb;
20
21static int runtime_instr_avail(void)
22{
23 return test_facility(64);
24}
25
26static void disable_runtime_instr(void)
27{
28 struct pt_regs *regs = task_pt_regs(current);
29
30 load_runtime_instr_cb(&runtime_instr_empty_cb);
31
32 /*
33 * Make sure the RI bit is deleted from the PSW. If the user did not
34 * switch off RI before the system call the process will get a
35 * specification exception otherwise.
36 */
37 regs->psw.mask &= ~PSW_MASK_RI;
38}
39
40static void init_runtime_instr_cb(struct runtime_instr_cb *cb)
41{
42 cb->buf_limit = 0xfff;
43 if (s390_user_mode == HOME_SPACE_MODE)
44 cb->home_space = 1;
45 cb->int_requested = 1;
46 cb->pstate = 1;
47 cb->pstate_set_buf = 1;
48 cb->pstate_sample = 1;
49 cb->pstate_collect = 1;
50 cb->key = PAGE_DEFAULT_KEY;
51 cb->valid = 1;
52}
53
54void exit_thread_runtime_instr(void)
55{
56 struct task_struct *task = current;
57
58 if (!task->thread.ri_cb)
59 return;
60 disable_runtime_instr();
61 kfree(task->thread.ri_cb);
62 task->thread.ri_signum = 0;
63 task->thread.ri_cb = NULL;
64}
65
66static void runtime_instr_int_handler(struct ext_code ext_code,
67 unsigned int param32, unsigned long param64)
68{
69 struct siginfo info;
70
71 if (!(param32 & CPU_MF_INT_RI_MASK))
72 return;
73
74 kstat_cpu(smp_processor_id()).irqs[EXTINT_CMR]++;
75
76 if (!current->thread.ri_cb)
77 return;
78 if (current->thread.ri_signum < SIGRTMIN ||
79 current->thread.ri_signum > SIGRTMAX) {
80 WARN_ON_ONCE(1);
81 return;
82 }
83
84 memset(&info, 0, sizeof(info));
85 info.si_signo = current->thread.ri_signum;
86 info.si_code = SI_QUEUE;
87 if (param32 & CPU_MF_INT_RI_BUF_FULL)
88 info.si_int = ENOBUFS;
89 else if (param32 & CPU_MF_INT_RI_HALTED)
90 info.si_int = ECANCELED;
91 else
92 return; /* unknown reason */
93
94 send_sig_info(current->thread.ri_signum, &info, current);
95}
96
97SYSCALL_DEFINE2(s390_runtime_instr, int, command, int, signum)
98{
99 struct runtime_instr_cb *cb;
100
101 if (!runtime_instr_avail())
102 return -EOPNOTSUPP;
103
104 if (command == S390_RUNTIME_INSTR_STOP) {
105 preempt_disable();
106 exit_thread_runtime_instr();
107 preempt_enable();
108 return 0;
109 }
110
111 if (command != S390_RUNTIME_INSTR_START ||
112 (signum < SIGRTMIN || signum > SIGRTMAX))
113 return -EINVAL;
114
115 if (!current->thread.ri_cb) {
116 cb = kzalloc(sizeof(*cb), GFP_KERNEL);
117 if (!cb)
118 return -ENOMEM;
119 } else {
120 cb = current->thread.ri_cb;
121 memset(cb, 0, sizeof(*cb));
122 }
123
124 init_runtime_instr_cb(cb);
125 current->thread.ri_signum = signum;
126
127 /* now load the control block to make it available */
128 preempt_disable();
129 current->thread.ri_cb = cb;
130 load_runtime_instr_cb(cb);
131 preempt_enable();
132 return 0;
133}
134
135static int __init runtime_instr_init(void)
136{
137 int rc;
138
139 if (!runtime_instr_avail())
140 return 0;
141
142 measurement_alert_subclass_register();
143 rc = register_external_interrupt(0x1407, runtime_instr_int_handler);
144 if (rc)
145 measurement_alert_subclass_unregister();
146 else
147 pr_info("Runtime instrumentation facility initialized\n");
148 return rc;
149}
150device_initcall(runtime_instr_init);
diff --git a/arch/s390/kernel/s390_ksyms.c b/arch/s390/kernel/s390_ksyms.c
index 57b536649b00..9bdbcef1da9e 100644
--- a/arch/s390/kernel/s390_ksyms.c
+++ b/arch/s390/kernel/s390_ksyms.c
@@ -8,3 +8,5 @@ EXPORT_SYMBOL(_mcount);
8#if defined(CONFIG_KVM) || defined(CONFIG_KVM_MODULE) 8#if defined(CONFIG_KVM) || defined(CONFIG_KVM_MODULE)
9EXPORT_SYMBOL(sie64a); 9EXPORT_SYMBOL(sie64a);
10#endif 10#endif
11EXPORT_SYMBOL(memcpy);
12EXPORT_SYMBOL(memset);
diff --git a/arch/s390/kernel/setup.c b/arch/s390/kernel/setup.c
index 40b57693de38..afa9fdba200e 100644
--- a/arch/s390/kernel/setup.c
+++ b/arch/s390/kernel/setup.c
@@ -302,10 +302,10 @@ static int __init parse_vmalloc(char *arg)
302} 302}
303early_param("vmalloc", parse_vmalloc); 303early_param("vmalloc", parse_vmalloc);
304 304
305unsigned int addressing_mode = HOME_SPACE_MODE; 305unsigned int s390_user_mode = PRIMARY_SPACE_MODE;
306EXPORT_SYMBOL_GPL(addressing_mode); 306EXPORT_SYMBOL_GPL(s390_user_mode);
307 307
308static int set_amode_primary(void) 308static void __init set_user_mode_primary(void)
309{ 309{
310 psw_kernel_bits = (psw_kernel_bits & ~PSW_MASK_ASC) | PSW_ASC_HOME; 310 psw_kernel_bits = (psw_kernel_bits & ~PSW_MASK_ASC) | PSW_ASC_HOME;
311 psw_user_bits = (psw_user_bits & ~PSW_MASK_ASC) | PSW_ASC_PRIMARY; 311 psw_user_bits = (psw_user_bits & ~PSW_MASK_ASC) | PSW_ASC_PRIMARY;
@@ -313,48 +313,30 @@ static int set_amode_primary(void)
313 psw32_user_bits = 313 psw32_user_bits =
314 (psw32_user_bits & ~PSW32_MASK_ASC) | PSW32_ASC_PRIMARY; 314 (psw32_user_bits & ~PSW32_MASK_ASC) | PSW32_ASC_PRIMARY;
315#endif 315#endif
316 316 uaccess = MACHINE_HAS_MVCOS ? uaccess_mvcos_switch : uaccess_pt;
317 if (MACHINE_HAS_MVCOS) {
318 memcpy(&uaccess, &uaccess_mvcos_switch, sizeof(uaccess));
319 return 1;
320 } else {
321 memcpy(&uaccess, &uaccess_pt, sizeof(uaccess));
322 return 0;
323 }
324}
325
326/*
327 * Switch kernel/user addressing modes?
328 */
329static int __init early_parse_switch_amode(char *p)
330{
331 addressing_mode = PRIMARY_SPACE_MODE;
332 return 0;
333} 317}
334early_param("switch_amode", early_parse_switch_amode);
335 318
336static int __init early_parse_user_mode(char *p) 319static int __init early_parse_user_mode(char *p)
337{ 320{
338 if (p && strcmp(p, "primary") == 0) 321 if (p && strcmp(p, "primary") == 0)
339 addressing_mode = PRIMARY_SPACE_MODE; 322 s390_user_mode = PRIMARY_SPACE_MODE;
340 else if (!p || strcmp(p, "home") == 0) 323 else if (!p || strcmp(p, "home") == 0)
341 addressing_mode = HOME_SPACE_MODE; 324 s390_user_mode = HOME_SPACE_MODE;
342 else 325 else
343 return 1; 326 return 1;
344 return 0; 327 return 0;
345} 328}
346early_param("user_mode", early_parse_user_mode); 329early_param("user_mode", early_parse_user_mode);
347 330
348static void setup_addressing_mode(void) 331static void __init setup_addressing_mode(void)
349{ 332{
350 if (addressing_mode == PRIMARY_SPACE_MODE) { 333 if (s390_user_mode != PRIMARY_SPACE_MODE)
351 if (set_amode_primary()) 334 return;
352 pr_info("Address spaces switched, " 335 set_user_mode_primary();
353 "mvcos available\n"); 336 if (MACHINE_HAS_MVCOS)
354 else 337 pr_info("Address spaces switched, mvcos available\n");
355 pr_info("Address spaces switched, " 338 else
356 "mvcos not available\n"); 339 pr_info("Address spaces switched, mvcos not available\n");
357 }
358} 340}
359 341
360void *restart_stack __attribute__((__section__(".data"))); 342void *restart_stack __attribute__((__section__(".data")));
@@ -602,9 +584,7 @@ static void __init setup_memory_end(void)
602 584
603static void __init setup_vmcoreinfo(void) 585static void __init setup_vmcoreinfo(void)
604{ 586{
605#ifdef CONFIG_KEXEC
606 mem_assign_absolute(S390_lowcore.vmcore_info, paddr_vmcoreinfo_note()); 587 mem_assign_absolute(S390_lowcore.vmcore_info, paddr_vmcoreinfo_note());
607#endif
608} 588}
609 589
610#ifdef CONFIG_CRASH_DUMP 590#ifdef CONFIG_CRASH_DUMP
@@ -980,6 +960,12 @@ static void __init setup_hwcaps(void)
980 * HWCAP_S390_HIGH_GPRS is bit 9. 960 * HWCAP_S390_HIGH_GPRS is bit 9.
981 */ 961 */
982 elf_hwcap |= HWCAP_S390_HIGH_GPRS; 962 elf_hwcap |= HWCAP_S390_HIGH_GPRS;
963
964 /*
965 * Transactional execution support HWCAP_S390_TE is bit 10.
966 */
967 if (test_facility(50) && test_facility(73))
968 elf_hwcap |= HWCAP_S390_TE;
983#endif 969#endif
984 970
985 get_cpu_id(&cpu_id); 971 get_cpu_id(&cpu_id);
diff --git a/arch/s390/kernel/smp.c b/arch/s390/kernel/smp.c
index 720fda1620f2..ea431e551c6b 100644
--- a/arch/s390/kernel/smp.c
+++ b/arch/s390/kernel/smp.c
@@ -66,7 +66,7 @@ struct pcpu {
66 unsigned long panic_stack; /* panic stack for the cpu */ 66 unsigned long panic_stack; /* panic stack for the cpu */
67 unsigned long ec_mask; /* bit mask for ec_xxx functions */ 67 unsigned long ec_mask; /* bit mask for ec_xxx functions */
68 int state; /* physical cpu state */ 68 int state; /* physical cpu state */
69 u32 status; /* last status received via sigp */ 69 int polarization; /* physical polarization */
70 u16 address; /* physical cpu address */ 70 u16 address; /* physical cpu address */
71}; 71};
72 72
@@ -74,6 +74,10 @@ static u8 boot_cpu_type;
74static u16 boot_cpu_address; 74static u16 boot_cpu_address;
75static struct pcpu pcpu_devices[NR_CPUS]; 75static struct pcpu pcpu_devices[NR_CPUS];
76 76
77/*
78 * The smp_cpu_state_mutex must be held when changing the state or polarization
79 * member of a pcpu data structure within the pcpu_devices arreay.
80 */
77DEFINE_MUTEX(smp_cpu_state_mutex); 81DEFINE_MUTEX(smp_cpu_state_mutex);
78 82
79/* 83/*
@@ -99,7 +103,7 @@ static inline int __pcpu_sigp_relax(u16 addr, u8 order, u32 parm, u32 *status)
99 int cc; 103 int cc;
100 104
101 while (1) { 105 while (1) {
102 cc = __pcpu_sigp(addr, order, parm, status); 106 cc = __pcpu_sigp(addr, order, parm, NULL);
103 if (cc != SIGP_CC_BUSY) 107 if (cc != SIGP_CC_BUSY)
104 return cc; 108 return cc;
105 cpu_relax(); 109 cpu_relax();
@@ -111,7 +115,7 @@ static int pcpu_sigp_retry(struct pcpu *pcpu, u8 order, u32 parm)
111 int cc, retry; 115 int cc, retry;
112 116
113 for (retry = 0; ; retry++) { 117 for (retry = 0; ; retry++) {
114 cc = __pcpu_sigp(pcpu->address, order, parm, &pcpu->status); 118 cc = __pcpu_sigp(pcpu->address, order, parm, NULL);
115 if (cc != SIGP_CC_BUSY) 119 if (cc != SIGP_CC_BUSY)
116 break; 120 break;
117 if (retry >= 3) 121 if (retry >= 3)
@@ -122,16 +126,18 @@ static int pcpu_sigp_retry(struct pcpu *pcpu, u8 order, u32 parm)
122 126
123static inline int pcpu_stopped(struct pcpu *pcpu) 127static inline int pcpu_stopped(struct pcpu *pcpu)
124{ 128{
129 u32 uninitialized_var(status);
130
125 if (__pcpu_sigp(pcpu->address, SIGP_SENSE, 131 if (__pcpu_sigp(pcpu->address, SIGP_SENSE,
126 0, &pcpu->status) != SIGP_CC_STATUS_STORED) 132 0, &status) != SIGP_CC_STATUS_STORED)
127 return 0; 133 return 0;
128 return !!(pcpu->status & (SIGP_STATUS_CHECK_STOP|SIGP_STATUS_STOPPED)); 134 return !!(status & (SIGP_STATUS_CHECK_STOP|SIGP_STATUS_STOPPED));
129} 135}
130 136
131static inline int pcpu_running(struct pcpu *pcpu) 137static inline int pcpu_running(struct pcpu *pcpu)
132{ 138{
133 if (__pcpu_sigp(pcpu->address, SIGP_SENSE_RUNNING, 139 if (__pcpu_sigp(pcpu->address, SIGP_SENSE_RUNNING,
134 0, &pcpu->status) != SIGP_CC_STATUS_STORED) 140 0, NULL) != SIGP_CC_STATUS_STORED)
135 return 1; 141 return 1;
136 /* Status stored condition code is equivalent to cpu not running. */ 142 /* Status stored condition code is equivalent to cpu not running. */
137 return 0; 143 return 0;
@@ -586,6 +592,16 @@ static inline void smp_get_save_area(int cpu, u16 address) { }
586 592
587#endif /* CONFIG_ZFCPDUMP || CONFIG_CRASH_DUMP */ 593#endif /* CONFIG_ZFCPDUMP || CONFIG_CRASH_DUMP */
588 594
595void smp_cpu_set_polarization(int cpu, int val)
596{
597 pcpu_devices[cpu].polarization = val;
598}
599
600int smp_cpu_get_polarization(int cpu)
601{
602 return pcpu_devices[cpu].polarization;
603}
604
589static struct sclp_cpu_info *smp_get_cpu_info(void) 605static struct sclp_cpu_info *smp_get_cpu_info(void)
590{ 606{
591 static int use_sigp_detection; 607 static int use_sigp_detection;
@@ -628,7 +644,7 @@ static int __devinit __smp_rescan_cpus(struct sclp_cpu_info *info,
628 pcpu->address = info->cpu[i].address; 644 pcpu->address = info->cpu[i].address;
629 pcpu->state = (cpu >= info->configured) ? 645 pcpu->state = (cpu >= info->configured) ?
630 CPU_STATE_STANDBY : CPU_STATE_CONFIGURED; 646 CPU_STATE_STANDBY : CPU_STATE_CONFIGURED;
631 cpu_set_polarization(cpu, POLARIZATION_UNKNOWN); 647 smp_cpu_set_polarization(cpu, POLARIZATION_UNKNOWN);
632 set_cpu_present(cpu, true); 648 set_cpu_present(cpu, true);
633 if (sysfs_add && smp_add_present_cpu(cpu) != 0) 649 if (sysfs_add && smp_add_present_cpu(cpu) != 0)
634 set_cpu_present(cpu, false); 650 set_cpu_present(cpu, false);
@@ -796,7 +812,7 @@ void __init smp_prepare_boot_cpu(void)
796 pcpu->async_stack = S390_lowcore.async_stack - ASYNC_SIZE; 812 pcpu->async_stack = S390_lowcore.async_stack - ASYNC_SIZE;
797 pcpu->panic_stack = S390_lowcore.panic_stack - PAGE_SIZE; 813 pcpu->panic_stack = S390_lowcore.panic_stack - PAGE_SIZE;
798 S390_lowcore.percpu_offset = __per_cpu_offset[0]; 814 S390_lowcore.percpu_offset = __per_cpu_offset[0];
799 cpu_set_polarization(0, POLARIZATION_UNKNOWN); 815 smp_cpu_set_polarization(0, POLARIZATION_UNKNOWN);
800 set_cpu_present(0, true); 816 set_cpu_present(0, true);
801 set_cpu_online(0, true); 817 set_cpu_online(0, true);
802} 818}
@@ -862,7 +878,7 @@ static ssize_t cpu_configure_store(struct device *dev,
862 if (rc) 878 if (rc)
863 break; 879 break;
864 pcpu->state = CPU_STATE_STANDBY; 880 pcpu->state = CPU_STATE_STANDBY;
865 cpu_set_polarization(cpu, POLARIZATION_UNKNOWN); 881 smp_cpu_set_polarization(cpu, POLARIZATION_UNKNOWN);
866 topology_expect_change(); 882 topology_expect_change();
867 break; 883 break;
868 case 1: 884 case 1:
@@ -872,7 +888,7 @@ static ssize_t cpu_configure_store(struct device *dev,
872 if (rc) 888 if (rc)
873 break; 889 break;
874 pcpu->state = CPU_STATE_CONFIGURED; 890 pcpu->state = CPU_STATE_CONFIGURED;
875 cpu_set_polarization(cpu, POLARIZATION_UNKNOWN); 891 smp_cpu_set_polarization(cpu, POLARIZATION_UNKNOWN);
876 topology_expect_change(); 892 topology_expect_change();
877 break; 893 break;
878 default: 894 default:
@@ -959,23 +975,17 @@ static int __cpuinit smp_cpu_notify(struct notifier_block *self,
959 struct device *s = &c->dev; 975 struct device *s = &c->dev;
960 int err = 0; 976 int err = 0;
961 977
962 switch (action) { 978 switch (action & ~CPU_TASKS_FROZEN) {
963 case CPU_ONLINE: 979 case CPU_ONLINE:
964 case CPU_ONLINE_FROZEN:
965 err = sysfs_create_group(&s->kobj, &cpu_online_attr_group); 980 err = sysfs_create_group(&s->kobj, &cpu_online_attr_group);
966 break; 981 break;
967 case CPU_DEAD: 982 case CPU_DEAD:
968 case CPU_DEAD_FROZEN:
969 sysfs_remove_group(&s->kobj, &cpu_online_attr_group); 983 sysfs_remove_group(&s->kobj, &cpu_online_attr_group);
970 break; 984 break;
971 } 985 }
972 return notifier_from_errno(err); 986 return notifier_from_errno(err);
973} 987}
974 988
975static struct notifier_block __cpuinitdata smp_cpu_nb = {
976 .notifier_call = smp_cpu_notify,
977};
978
979static int __devinit smp_add_present_cpu(int cpu) 989static int __devinit smp_add_present_cpu(int cpu)
980{ 990{
981 struct cpu *c = &pcpu_devices[cpu].cpu; 991 struct cpu *c = &pcpu_devices[cpu].cpu;
@@ -1050,7 +1060,7 @@ static int __init s390_smp_init(void)
1050{ 1060{
1051 int cpu, rc; 1061 int cpu, rc;
1052 1062
1053 register_cpu_notifier(&smp_cpu_nb); 1063 hotcpu_notifier(smp_cpu_notify, 0);
1054#ifdef CONFIG_HOTPLUG_CPU 1064#ifdef CONFIG_HOTPLUG_CPU
1055 rc = device_create_file(cpu_subsys.dev_root, &dev_attr_rescan); 1065 rc = device_create_file(cpu_subsys.dev_root, &dev_attr_rescan);
1056 if (rc) 1066 if (rc)
diff --git a/arch/s390/kernel/syscalls.S b/arch/s390/kernel/syscalls.S
index bcab2f04ba58..48174850f3b0 100644
--- a/arch/s390/kernel/syscalls.S
+++ b/arch/s390/kernel/syscalls.S
@@ -350,3 +350,5 @@ SYSCALL(sys_syncfs,sys_syncfs,sys_syncfs_wrapper)
350SYSCALL(sys_setns,sys_setns,sys_setns_wrapper) 350SYSCALL(sys_setns,sys_setns,sys_setns_wrapper)
351SYSCALL(sys_process_vm_readv,sys_process_vm_readv,compat_sys_process_vm_readv_wrapper) /* 340 */ 351SYSCALL(sys_process_vm_readv,sys_process_vm_readv,compat_sys_process_vm_readv_wrapper) /* 340 */
352SYSCALL(sys_process_vm_writev,sys_process_vm_writev,compat_sys_process_vm_writev_wrapper) 352SYSCALL(sys_process_vm_writev,sys_process_vm_writev,compat_sys_process_vm_writev_wrapper)
353SYSCALL(sys_ni_syscall,sys_s390_runtime_instr,sys_s390_runtime_instr_wrapper)
354SYSCALL(sys_kcmp,sys_kcmp,sys_kcmp_wrapper)
diff --git a/arch/s390/kernel/sysinfo.c b/arch/s390/kernel/sysinfo.c
index fa0eb238dac7..62f89d98e880 100644
--- a/arch/s390/kernel/sysinfo.c
+++ b/arch/s390/kernel/sysinfo.c
@@ -22,17 +22,41 @@
22#include <math-emu/soft-fp.h> 22#include <math-emu/soft-fp.h>
23#include <math-emu/single.h> 23#include <math-emu/single.h>
24 24
25static inline int stsi_0(void) 25int topology_max_mnest;
26
27/*
28 * stsi - store system information
29 *
30 * Returns the current configuration level if function code 0 was specified.
31 * Otherwise returns 0 on success or a negative value on error.
32 */
33int stsi(void *sysinfo, int fc, int sel1, int sel2)
26{ 34{
27 int rc = stsi(NULL, 0, 0, 0); 35 register int r0 asm("0") = (fc << 28) | sel1;
28 return rc == -ENOSYS ? rc : (((unsigned int) rc) >> 28); 36 register int r1 asm("1") = sel2;
37 int rc = 0;
38
39 asm volatile(
40 " stsi 0(%3)\n"
41 "0: jz 2f\n"
42 "1: lhi %1,%4\n"
43 "2:\n"
44 EX_TABLE(0b, 1b)
45 : "+d" (r0), "+d" (rc)
46 : "d" (r1), "a" (sysinfo), "K" (-EOPNOTSUPP)
47 : "cc", "memory");
48 if (rc)
49 return rc;
50 return fc ? 0 : ((unsigned int) r0) >> 28;
29} 51}
52EXPORT_SYMBOL(stsi);
30 53
31static int stsi_1_1_1(struct sysinfo_1_1_1 *info, char *page, int len) 54static void stsi_1_1_1(struct seq_file *m, struct sysinfo_1_1_1 *info)
32{ 55{
33 if (stsi(info, 1, 1, 1) == -ENOSYS) 56 int i;
34 return len;
35 57
58 if (stsi(info, 1, 1, 1))
59 return;
36 EBCASC(info->manufacturer, sizeof(info->manufacturer)); 60 EBCASC(info->manufacturer, sizeof(info->manufacturer));
37 EBCASC(info->type, sizeof(info->type)); 61 EBCASC(info->type, sizeof(info->type));
38 EBCASC(info->model, sizeof(info->model)); 62 EBCASC(info->model, sizeof(info->model));
@@ -41,242 +65,197 @@ static int stsi_1_1_1(struct sysinfo_1_1_1 *info, char *page, int len)
41 EBCASC(info->model_capacity, sizeof(info->model_capacity)); 65 EBCASC(info->model_capacity, sizeof(info->model_capacity));
42 EBCASC(info->model_perm_cap, sizeof(info->model_perm_cap)); 66 EBCASC(info->model_perm_cap, sizeof(info->model_perm_cap));
43 EBCASC(info->model_temp_cap, sizeof(info->model_temp_cap)); 67 EBCASC(info->model_temp_cap, sizeof(info->model_temp_cap));
44 len += sprintf(page + len, "Manufacturer: %-16.16s\n", 68 seq_printf(m, "Manufacturer: %-16.16s\n", info->manufacturer);
45 info->manufacturer); 69 seq_printf(m, "Type: %-4.4s\n", info->type);
46 len += sprintf(page + len, "Type: %-4.4s\n", 70 /*
47 info->type); 71 * Sigh: the model field has been renamed with System z9
72 * to model_capacity and a new model field has been added
73 * after the plant field. To avoid confusing older programs
74 * the "Model:" prints "model_capacity model" or just
75 * "model_capacity" if the model string is empty .
76 */
77 seq_printf(m, "Model: %-16.16s", info->model_capacity);
48 if (info->model[0] != '\0') 78 if (info->model[0] != '\0')
49 /* 79 seq_printf(m, " %-16.16s", info->model);
50 * Sigh: the model field has been renamed with System z9 80 seq_putc(m, '\n');
51 * to model_capacity and a new model field has been added 81 seq_printf(m, "Sequence Code: %-16.16s\n", info->sequence);
52 * after the plant field. To avoid confusing older programs 82 seq_printf(m, "Plant: %-4.4s\n", info->plant);
53 * the "Model:" prints "model_capacity model" or just 83 seq_printf(m, "Model Capacity: %-16.16s %08u\n",
54 * "model_capacity" if the model string is empty . 84 info->model_capacity, info->model_cap_rating);
55 */ 85 if (info->model_perm_cap_rating)
56 len += sprintf(page + len, 86 seq_printf(m, "Model Perm. Capacity: %-16.16s %08u\n",
57 "Model: %-16.16s %-16.16s\n", 87 info->model_perm_cap,
58 info->model_capacity, info->model); 88 info->model_perm_cap_rating);
59 else 89 if (info->model_temp_cap_rating)
60 len += sprintf(page + len, "Model: %-16.16s\n", 90 seq_printf(m, "Model Temp. Capacity: %-16.16s %08u\n",
61 info->model_capacity); 91 info->model_temp_cap,
62 len += sprintf(page + len, "Sequence Code: %-16.16s\n", 92 info->model_temp_cap_rating);
63 info->sequence); 93 if (info->ncr)
64 len += sprintf(page + len, "Plant: %-4.4s\n", 94 seq_printf(m, "Nominal Cap. Rating: %08u\n", info->ncr);
65 info->plant); 95 if (info->npr)
66 len += sprintf(page + len, "Model Capacity: %-16.16s %08u\n", 96 seq_printf(m, "Nominal Perm. Rating: %08u\n", info->npr);
67 info->model_capacity, *(u32 *) info->model_cap_rating); 97 if (info->ntr)
68 if (info->model_perm_cap[0] != '\0') 98 seq_printf(m, "Nominal Temp. Rating: %08u\n", info->ntr);
69 len += sprintf(page + len,
70 "Model Perm. Capacity: %-16.16s %08u\n",
71 info->model_perm_cap,
72 *(u32 *) info->model_perm_cap_rating);
73 if (info->model_temp_cap[0] != '\0')
74 len += sprintf(page + len,
75 "Model Temp. Capacity: %-16.16s %08u\n",
76 info->model_temp_cap,
77 *(u32 *) info->model_temp_cap_rating);
78 if (info->cai) { 99 if (info->cai) {
79 len += sprintf(page + len, 100 seq_printf(m, "Capacity Adj. Ind.: %d\n", info->cai);
80 "Capacity Adj. Ind.: %d\n", 101 seq_printf(m, "Capacity Ch. Reason: %d\n", info->ccr);
81 info->cai); 102 seq_printf(m, "Capacity Transient: %d\n", info->t);
82 len += sprintf(page + len, "Capacity Ch. Reason: %d\n", 103 }
83 info->ccr); 104 if (info->p) {
105 for (i = 1; i <= ARRAY_SIZE(info->typepct); i++) {
106 seq_printf(m, "Type %d Percentage: %d\n",
107 i, info->typepct[i - 1]);
108 }
84 } 109 }
85 return len;
86} 110}
87 111
88static int stsi_15_1_x(struct sysinfo_15_1_x *info, char *page, int len) 112static void stsi_15_1_x(struct seq_file *m, struct sysinfo_15_1_x *info)
89{ 113{
90 static int max_mnest; 114 static int max_mnest;
91 int i, rc; 115 int i, rc;
92 116
93 len += sprintf(page + len, "\n"); 117 seq_putc(m, '\n');
94 if (!MACHINE_HAS_TOPOLOGY) 118 if (!MACHINE_HAS_TOPOLOGY)
95 return len; 119 return;
96 if (max_mnest) { 120 if (stsi(info, 15, 1, topology_max_mnest))
97 stsi(info, 15, 1, max_mnest); 121 return;
98 } else { 122 seq_printf(m, "CPU Topology HW: ");
99 for (max_mnest = 6; max_mnest > 1; max_mnest--) {
100 rc = stsi(info, 15, 1, max_mnest);
101 if (rc != -ENOSYS)
102 break;
103 }
104 }
105 len += sprintf(page + len, "CPU Topology HW: ");
106 for (i = 0; i < TOPOLOGY_NR_MAG; i++) 123 for (i = 0; i < TOPOLOGY_NR_MAG; i++)
107 len += sprintf(page + len, " %d", info->mag[i]); 124 seq_printf(m, " %d", info->mag[i]);
108 len += sprintf(page + len, "\n"); 125 seq_putc(m, '\n');
109#ifdef CONFIG_SCHED_MC 126#ifdef CONFIG_SCHED_MC
110 store_topology(info); 127 store_topology(info);
111 len += sprintf(page + len, "CPU Topology SW: "); 128 seq_printf(m, "CPU Topology SW: ");
112 for (i = 0; i < TOPOLOGY_NR_MAG; i++) 129 for (i = 0; i < TOPOLOGY_NR_MAG; i++)
113 len += sprintf(page + len, " %d", info->mag[i]); 130 seq_printf(m, " %d", info->mag[i]);
114 len += sprintf(page + len, "\n"); 131 seq_putc(m, '\n');
115#endif 132#endif
116 return len;
117} 133}
118 134
119static int stsi_1_2_2(struct sysinfo_1_2_2 *info, char *page, int len) 135static void stsi_1_2_2(struct seq_file *m, struct sysinfo_1_2_2 *info)
120{ 136{
121 struct sysinfo_1_2_2_extension *ext; 137 struct sysinfo_1_2_2_extension *ext;
122 int i; 138 int i;
123 139
124 if (stsi(info, 1, 2, 2) == -ENOSYS) 140 if (stsi(info, 1, 2, 2))
125 return len; 141 return;
126 ext = (struct sysinfo_1_2_2_extension *) 142 ext = (struct sysinfo_1_2_2_extension *)
127 ((unsigned long) info + info->acc_offset); 143 ((unsigned long) info + info->acc_offset);
128 144 seq_printf(m, "CPUs Total: %d\n", info->cpus_total);
129 len += sprintf(page + len, "CPUs Total: %d\n", 145 seq_printf(m, "CPUs Configured: %d\n", info->cpus_configured);
130 info->cpus_total); 146 seq_printf(m, "CPUs Standby: %d\n", info->cpus_standby);
131 len += sprintf(page + len, "CPUs Configured: %d\n", 147 seq_printf(m, "CPUs Reserved: %d\n", info->cpus_reserved);
132 info->cpus_configured); 148 /*
133 len += sprintf(page + len, "CPUs Standby: %d\n", 149 * Sigh 2. According to the specification the alternate
134 info->cpus_standby); 150 * capability field is a 32 bit floating point number
135 len += sprintf(page + len, "CPUs Reserved: %d\n", 151 * if the higher order 8 bits are not zero. Printing
136 info->cpus_reserved); 152 * a floating point number in the kernel is a no-no,
137 153 * always print the number as 32 bit unsigned integer.
138 if (info->format == 1) { 154 * The user-space needs to know about the strange
139 /* 155 * encoding of the alternate cpu capability.
140 * Sigh 2. According to the specification the alternate 156 */
141 * capability field is a 32 bit floating point number 157 seq_printf(m, "Capability: %u", info->capability);
142 * if the higher order 8 bits are not zero. Printing 158 if (info->format == 1)
143 * a floating point number in the kernel is a no-no, 159 seq_printf(m, " %u", ext->alt_capability);
144 * always print the number as 32 bit unsigned integer. 160 seq_putc(m, '\n');
145 * The user-space needs to know about the strange 161 if (info->nominal_cap)
146 * encoding of the alternate cpu capability. 162 seq_printf(m, "Nominal Capability: %d\n", info->nominal_cap);
147 */ 163 if (info->secondary_cap)
148 len += sprintf(page + len, "Capability: %u %u\n", 164 seq_printf(m, "Secondary Capability: %d\n", info->secondary_cap);
149 info->capability, ext->alt_capability); 165 for (i = 2; i <= info->cpus_total; i++) {
150 for (i = 2; i <= info->cpus_total; i++) 166 seq_printf(m, "Adjustment %02d-way: %u",
151 len += sprintf(page + len, 167 i, info->adjustment[i-2]);
152 "Adjustment %02d-way: %u %u\n", 168 if (info->format == 1)
153 i, info->adjustment[i-2], 169 seq_printf(m, " %u", ext->alt_adjustment[i-2]);
154 ext->alt_adjustment[i-2]); 170 seq_putc(m, '\n');
155
156 } else {
157 len += sprintf(page + len, "Capability: %u\n",
158 info->capability);
159 for (i = 2; i <= info->cpus_total; i++)
160 len += sprintf(page + len,
161 "Adjustment %02d-way: %u\n",
162 i, info->adjustment[i-2]);
163 } 171 }
164
165 if (info->secondary_capability != 0)
166 len += sprintf(page + len, "Secondary Capability: %d\n",
167 info->secondary_capability);
168 return len;
169} 172}
170 173
171static int stsi_2_2_2(struct sysinfo_2_2_2 *info, char *page, int len) 174static void stsi_2_2_2(struct seq_file *m, struct sysinfo_2_2_2 *info)
172{ 175{
173 if (stsi(info, 2, 2, 2) == -ENOSYS) 176 if (stsi(info, 2, 2, 2))
174 return len; 177 return;
175
176 EBCASC(info->name, sizeof(info->name)); 178 EBCASC(info->name, sizeof(info->name));
177 179 seq_putc(m, '\n');
178 len += sprintf(page + len, "\n"); 180 seq_printf(m, "LPAR Number: %d\n", info->lpar_number);
179 len += sprintf(page + len, "LPAR Number: %d\n", 181 seq_printf(m, "LPAR Characteristics: ");
180 info->lpar_number);
181
182 len += sprintf(page + len, "LPAR Characteristics: ");
183 if (info->characteristics & LPAR_CHAR_DEDICATED) 182 if (info->characteristics & LPAR_CHAR_DEDICATED)
184 len += sprintf(page + len, "Dedicated "); 183 seq_printf(m, "Dedicated ");
185 if (info->characteristics & LPAR_CHAR_SHARED) 184 if (info->characteristics & LPAR_CHAR_SHARED)
186 len += sprintf(page + len, "Shared "); 185 seq_printf(m, "Shared ");
187 if (info->characteristics & LPAR_CHAR_LIMITED) 186 if (info->characteristics & LPAR_CHAR_LIMITED)
188 len += sprintf(page + len, "Limited "); 187 seq_printf(m, "Limited ");
189 len += sprintf(page + len, "\n"); 188 seq_putc(m, '\n');
190 189 seq_printf(m, "LPAR Name: %-8.8s\n", info->name);
191 len += sprintf(page + len, "LPAR Name: %-8.8s\n", 190 seq_printf(m, "LPAR Adjustment: %d\n", info->caf);
192 info->name); 191 seq_printf(m, "LPAR CPUs Total: %d\n", info->cpus_total);
193 192 seq_printf(m, "LPAR CPUs Configured: %d\n", info->cpus_configured);
194 len += sprintf(page + len, "LPAR Adjustment: %d\n", 193 seq_printf(m, "LPAR CPUs Standby: %d\n", info->cpus_standby);
195 info->caf); 194 seq_printf(m, "LPAR CPUs Reserved: %d\n", info->cpus_reserved);
196 195 seq_printf(m, "LPAR CPUs Dedicated: %d\n", info->cpus_dedicated);
197 len += sprintf(page + len, "LPAR CPUs Total: %d\n", 196 seq_printf(m, "LPAR CPUs Shared: %d\n", info->cpus_shared);
198 info->cpus_total);
199 len += sprintf(page + len, "LPAR CPUs Configured: %d\n",
200 info->cpus_configured);
201 len += sprintf(page + len, "LPAR CPUs Standby: %d\n",
202 info->cpus_standby);
203 len += sprintf(page + len, "LPAR CPUs Reserved: %d\n",
204 info->cpus_reserved);
205 len += sprintf(page + len, "LPAR CPUs Dedicated: %d\n",
206 info->cpus_dedicated);
207 len += sprintf(page + len, "LPAR CPUs Shared: %d\n",
208 info->cpus_shared);
209 return len;
210} 197}
211 198
212static int stsi_3_2_2(struct sysinfo_3_2_2 *info, char *page, int len) 199static void stsi_3_2_2(struct seq_file *m, struct sysinfo_3_2_2 *info)
213{ 200{
214 int i; 201 int i;
215 202
216 if (stsi(info, 3, 2, 2) == -ENOSYS) 203 if (stsi(info, 3, 2, 2))
217 return len; 204 return;
218 for (i = 0; i < info->count; i++) { 205 for (i = 0; i < info->count; i++) {
219 EBCASC(info->vm[i].name, sizeof(info->vm[i].name)); 206 EBCASC(info->vm[i].name, sizeof(info->vm[i].name));
220 EBCASC(info->vm[i].cpi, sizeof(info->vm[i].cpi)); 207 EBCASC(info->vm[i].cpi, sizeof(info->vm[i].cpi));
221 len += sprintf(page + len, "\n"); 208 seq_putc(m, '\n');
222 len += sprintf(page + len, "VM%02d Name: %-8.8s\n", 209 seq_printf(m, "VM%02d Name: %-8.8s\n", i, info->vm[i].name);
223 i, info->vm[i].name); 210 seq_printf(m, "VM%02d Control Program: %-16.16s\n", i, info->vm[i].cpi);
224 len += sprintf(page + len, "VM%02d Control Program: %-16.16s\n", 211 seq_printf(m, "VM%02d Adjustment: %d\n", i, info->vm[i].caf);
225 i, info->vm[i].cpi); 212 seq_printf(m, "VM%02d CPUs Total: %d\n", i, info->vm[i].cpus_total);
226 213 seq_printf(m, "VM%02d CPUs Configured: %d\n", i, info->vm[i].cpus_configured);
227 len += sprintf(page + len, "VM%02d Adjustment: %d\n", 214 seq_printf(m, "VM%02d CPUs Standby: %d\n", i, info->vm[i].cpus_standby);
228 i, info->vm[i].caf); 215 seq_printf(m, "VM%02d CPUs Reserved: %d\n", i, info->vm[i].cpus_reserved);
229
230 len += sprintf(page + len, "VM%02d CPUs Total: %d\n",
231 i, info->vm[i].cpus_total);
232 len += sprintf(page + len, "VM%02d CPUs Configured: %d\n",
233 i, info->vm[i].cpus_configured);
234 len += sprintf(page + len, "VM%02d CPUs Standby: %d\n",
235 i, info->vm[i].cpus_standby);
236 len += sprintf(page + len, "VM%02d CPUs Reserved: %d\n",
237 i, info->vm[i].cpus_reserved);
238 } 216 }
239 return len;
240} 217}
241 218
242static int proc_read_sysinfo(char *page, char **start, 219static int sysinfo_show(struct seq_file *m, void *v)
243 off_t off, int count,
244 int *eof, void *data)
245{ 220{
246 unsigned long info = get_zeroed_page(GFP_KERNEL); 221 void *info = (void *)get_zeroed_page(GFP_KERNEL);
247 int level, len; 222 int level;
248 223
249 if (!info) 224 if (!info)
250 return 0; 225 return 0;
251 226 level = stsi(NULL, 0, 0, 0);
252 len = 0;
253 level = stsi_0();
254 if (level >= 1) 227 if (level >= 1)
255 len = stsi_1_1_1((struct sysinfo_1_1_1 *) info, page, len); 228 stsi_1_1_1(m, info);
256
257 if (level >= 1) 229 if (level >= 1)
258 len = stsi_15_1_x((struct sysinfo_15_1_x *) info, page, len); 230 stsi_15_1_x(m, info);
259
260 if (level >= 1) 231 if (level >= 1)
261 len = stsi_1_2_2((struct sysinfo_1_2_2 *) info, page, len); 232 stsi_1_2_2(m, info);
262
263 if (level >= 2) 233 if (level >= 2)
264 len = stsi_2_2_2((struct sysinfo_2_2_2 *) info, page, len); 234 stsi_2_2_2(m, info);
265
266 if (level >= 3) 235 if (level >= 3)
267 len = stsi_3_2_2((struct sysinfo_3_2_2 *) info, page, len); 236 stsi_3_2_2(m, info);
237 free_page((unsigned long)info);
238 return 0;
239}
268 240
269 free_page(info); 241static int sysinfo_open(struct inode *inode, struct file *file)
270 return len; 242{
243 return single_open(file, sysinfo_show, NULL);
271} 244}
272 245
273static __init int create_proc_sysinfo(void) 246static const struct file_operations sysinfo_fops = {
247 .open = sysinfo_open,
248 .read = seq_read,
249 .llseek = seq_lseek,
250 .release = single_release,
251};
252
253static int __init sysinfo_create_proc(void)
274{ 254{
275 create_proc_read_entry("sysinfo", 0444, NULL, 255 proc_create("sysinfo", 0444, NULL, &sysinfo_fops);
276 proc_read_sysinfo, NULL);
277 return 0; 256 return 0;
278} 257}
279device_initcall(create_proc_sysinfo); 258device_initcall(sysinfo_create_proc);
280 259
281/* 260/*
282 * Service levels interface. 261 * Service levels interface.
@@ -407,7 +386,7 @@ void s390_adjust_jiffies(void)
407 if (!info) 386 if (!info)
408 return; 387 return;
409 388
410 if (stsi(info, 1, 2, 2) != -ENOSYS) { 389 if (stsi(info, 1, 2, 2) == 0) {
411 /* 390 /*
412 * Major sigh. The cpu capability encoding is "special". 391 * Major sigh. The cpu capability encoding is "special".
413 * If the first 9 bits of info->capability are 0 then it 392 * If the first 9 bits of info->capability are 0 then it
diff --git a/arch/s390/kernel/time.c b/arch/s390/kernel/time.c
index dcec960fc724..2db1011b8b19 100644
--- a/arch/s390/kernel/time.c
+++ b/arch/s390/kernel/time.c
@@ -329,7 +329,7 @@ static unsigned long clock_sync_flags;
329 * The synchronous get_clock function. It will write the current clock 329 * The synchronous get_clock function. It will write the current clock
330 * value to the clock pointer and return 0 if the clock is in sync with 330 * value to the clock pointer and return 0 if the clock is in sync with
331 * the external time source. If the clock mode is local it will return 331 * the external time source. If the clock mode is local it will return
332 * -ENOSYS and -EAGAIN if the clock is not in sync with the external 332 * -EOPNOTSUPP and -EAGAIN if the clock is not in sync with the external
333 * reference. 333 * reference.
334 */ 334 */
335int get_sync_clock(unsigned long long *clock) 335int get_sync_clock(unsigned long long *clock)
@@ -347,7 +347,7 @@ int get_sync_clock(unsigned long long *clock)
347 return 0; 347 return 0;
348 if (!test_bit(CLOCK_SYNC_HAS_ETR, &clock_sync_flags) && 348 if (!test_bit(CLOCK_SYNC_HAS_ETR, &clock_sync_flags) &&
349 !test_bit(CLOCK_SYNC_HAS_STP, &clock_sync_flags)) 349 !test_bit(CLOCK_SYNC_HAS_STP, &clock_sync_flags))
350 return -ENOSYS; 350 return -EOPNOTSUPP;
351 if (!test_bit(CLOCK_SYNC_ETR, &clock_sync_flags) && 351 if (!test_bit(CLOCK_SYNC_ETR, &clock_sync_flags) &&
352 !test_bit(CLOCK_SYNC_STP, &clock_sync_flags)) 352 !test_bit(CLOCK_SYNC_STP, &clock_sync_flags))
353 return -EACCES; 353 return -EACCES;
diff --git a/arch/s390/kernel/topology.c b/arch/s390/kernel/topology.c
index 05151e06c388..54d93f4b6818 100644
--- a/arch/s390/kernel/topology.c
+++ b/arch/s390/kernel/topology.c
@@ -17,6 +17,7 @@
17#include <linux/cpu.h> 17#include <linux/cpu.h>
18#include <linux/smp.h> 18#include <linux/smp.h>
19#include <linux/mm.h> 19#include <linux/mm.h>
20#include <asm/sysinfo.h>
20 21
21#define PTF_HORIZONTAL (0UL) 22#define PTF_HORIZONTAL (0UL)
22#define PTF_VERTICAL (1UL) 23#define PTF_VERTICAL (1UL)
@@ -44,9 +45,6 @@ static struct mask_info book_info;
44cpumask_t cpu_book_map[NR_CPUS]; 45cpumask_t cpu_book_map[NR_CPUS];
45unsigned char cpu_book_id[NR_CPUS]; 46unsigned char cpu_book_id[NR_CPUS];
46 47
47/* smp_cpu_state_mutex must be held when accessing this array */
48int cpu_polarization[NR_CPUS];
49
50static cpumask_t cpu_group_map(struct mask_info *info, unsigned int cpu) 48static cpumask_t cpu_group_map(struct mask_info *info, unsigned int cpu)
51{ 49{
52 cpumask_t mask; 50 cpumask_t mask;
@@ -75,10 +73,7 @@ static struct mask_info *add_cpus_to_mask(struct topology_cpu *tl_cpu,
75{ 73{
76 unsigned int cpu; 74 unsigned int cpu;
77 75
78 for (cpu = find_first_bit(&tl_cpu->mask[0], TOPOLOGY_CPU_BITS); 76 for_each_set_bit(cpu, &tl_cpu->mask[0], TOPOLOGY_CPU_BITS) {
79 cpu < TOPOLOGY_CPU_BITS;
80 cpu = find_next_bit(&tl_cpu->mask[0], TOPOLOGY_CPU_BITS, cpu + 1))
81 {
82 unsigned int rcpu; 77 unsigned int rcpu;
83 int lcpu; 78 int lcpu;
84 79
@@ -94,7 +89,7 @@ static struct mask_info *add_cpus_to_mask(struct topology_cpu *tl_cpu,
94 } else { 89 } else {
95 cpu_core_id[lcpu] = core->id; 90 cpu_core_id[lcpu] = core->id;
96 } 91 }
97 cpu_set_polarization(lcpu, tl_cpu->pp); 92 smp_cpu_set_polarization(lcpu, tl_cpu->pp);
98 } 93 }
99 } 94 }
100 return core; 95 return core;
@@ -201,7 +196,7 @@ static void topology_update_polarization_simple(void)
201 196
202 mutex_lock(&smp_cpu_state_mutex); 197 mutex_lock(&smp_cpu_state_mutex);
203 for_each_possible_cpu(cpu) 198 for_each_possible_cpu(cpu)
204 cpu_set_polarization(cpu, POLARIZATION_HRZ); 199 smp_cpu_set_polarization(cpu, POLARIZATION_HRZ);
205 mutex_unlock(&smp_cpu_state_mutex); 200 mutex_unlock(&smp_cpu_state_mutex);
206} 201}
207 202
@@ -231,7 +226,7 @@ int topology_set_cpu_management(int fc)
231 if (rc) 226 if (rc)
232 return -EBUSY; 227 return -EBUSY;
233 for_each_possible_cpu(cpu) 228 for_each_possible_cpu(cpu)
234 cpu_set_polarization(cpu, POLARIZATION_UNKNOWN); 229 smp_cpu_set_polarization(cpu, POLARIZATION_UNKNOWN);
235 return rc; 230 return rc;
236} 231}
237 232
@@ -250,12 +245,10 @@ static void update_cpu_core_map(void)
250 245
251void store_topology(struct sysinfo_15_1_x *info) 246void store_topology(struct sysinfo_15_1_x *info)
252{ 247{
253 int rc; 248 if (topology_max_mnest >= 3)
254 249 stsi(info, 15, 1, 3);
255 rc = stsi(info, 15, 1, 3); 250 else
256 if (rc != -ENOSYS) 251 stsi(info, 15, 1, 2);
257 return;
258 stsi(info, 15, 1, 2);
259} 252}
260 253
261int arch_update_cpu_topology(void) 254int arch_update_cpu_topology(void)
@@ -415,7 +408,7 @@ static ssize_t cpu_polarization_show(struct device *dev,
415 ssize_t count; 408 ssize_t count;
416 409
417 mutex_lock(&smp_cpu_state_mutex); 410 mutex_lock(&smp_cpu_state_mutex);
418 switch (cpu_read_polarization(cpu)) { 411 switch (smp_cpu_get_polarization(cpu)) {
419 case POLARIZATION_HRZ: 412 case POLARIZATION_HRZ:
420 count = sprintf(buf, "horizontal\n"); 413 count = sprintf(buf, "horizontal\n");
421 break; 414 break;
diff --git a/arch/s390/kernel/traps.c b/arch/s390/kernel/traps.c
index 01775c04a90e..3d2b0fa37db0 100644
--- a/arch/s390/kernel/traps.c
+++ b/arch/s390/kernel/traps.c
@@ -57,6 +57,23 @@ static int kstack_depth_to_print = 12;
57static int kstack_depth_to_print = 20; 57static int kstack_depth_to_print = 20;
58#endif /* CONFIG_64BIT */ 58#endif /* CONFIG_64BIT */
59 59
60static inline void __user *get_trap_ip(struct pt_regs *regs)
61{
62#ifdef CONFIG_64BIT
63 unsigned long address;
64
65 if (regs->int_code & 0x200)
66 address = *(unsigned long *)(current->thread.trap_tdb + 24);
67 else
68 address = regs->psw.addr;
69 return (void __user *)
70 ((address - (regs->int_code >> 16)) & PSW_ADDR_INSN);
71#else
72 return (void __user *)
73 ((regs->psw.addr - (regs->int_code >> 16)) & PSW_ADDR_INSN);
74#endif
75}
76
60/* 77/*
61 * For show_trace we have tree different stack to consider: 78 * For show_trace we have tree different stack to consider:
62 * - the panic stack which is used if the kernel stack has overflown 79 * - the panic stack which is used if the kernel stack has overflown
@@ -214,7 +231,6 @@ void show_registers(struct pt_regs *regs)
214 231
215void show_regs(struct pt_regs *regs) 232void show_regs(struct pt_regs *regs)
216{ 233{
217 print_modules();
218 printk("CPU: %d %s %s %.*s\n", 234 printk("CPU: %d %s %s %.*s\n",
219 task_thread_info(current)->cpu, print_tainted(), 235 task_thread_info(current)->cpu, print_tainted(),
220 init_utsname()->release, 236 init_utsname()->release,
@@ -254,6 +270,7 @@ void die(struct pt_regs *regs, const char *str)
254#endif 270#endif
255 printk("\n"); 271 printk("\n");
256 notify_die(DIE_OOPS, str, regs, 0, regs->int_code & 0xffff, SIGSEGV); 272 notify_die(DIE_OOPS, str, regs, 0, regs->int_code & 0xffff, SIGSEGV);
273 print_modules();
257 show_regs(regs); 274 show_regs(regs);
258 bust_spinlocks(0); 275 bust_spinlocks(0);
259 add_taint(TAINT_DIE); 276 add_taint(TAINT_DIE);
@@ -285,12 +302,6 @@ int is_valid_bugaddr(unsigned long addr)
285 return 1; 302 return 1;
286} 303}
287 304
288static inline void __user *get_psw_address(struct pt_regs *regs)
289{
290 return (void __user *)
291 ((regs->psw.addr - (regs->int_code >> 16)) & PSW_ADDR_INSN);
292}
293
294static void __kprobes do_trap(struct pt_regs *regs, 305static void __kprobes do_trap(struct pt_regs *regs,
295 int si_signo, int si_code, char *str) 306 int si_signo, int si_code, char *str)
296{ 307{
@@ -304,14 +315,14 @@ static void __kprobes do_trap(struct pt_regs *regs,
304 info.si_signo = si_signo; 315 info.si_signo = si_signo;
305 info.si_errno = 0; 316 info.si_errno = 0;
306 info.si_code = si_code; 317 info.si_code = si_code;
307 info.si_addr = get_psw_address(regs); 318 info.si_addr = get_trap_ip(regs);
308 force_sig_info(si_signo, &info, current); 319 force_sig_info(si_signo, &info, current);
309 report_user_fault(regs, si_signo); 320 report_user_fault(regs, si_signo);
310 } else { 321 } else {
311 const struct exception_table_entry *fixup; 322 const struct exception_table_entry *fixup;
312 fixup = search_exception_tables(regs->psw.addr & PSW_ADDR_INSN); 323 fixup = search_exception_tables(regs->psw.addr & PSW_ADDR_INSN);
313 if (fixup) 324 if (fixup)
314 regs->psw.addr = fixup->fixup | PSW_ADDR_AMODE; 325 regs->psw.addr = extable_fixup(fixup) | PSW_ADDR_AMODE;
315 else { 326 else {
316 enum bug_trap_type btt; 327 enum bug_trap_type btt;
317 328
@@ -381,6 +392,11 @@ DO_ERROR_INFO(special_op_exception, SIGILL, ILL_ILLOPN,
381DO_ERROR_INFO(translation_exception, SIGILL, ILL_ILLOPN, 392DO_ERROR_INFO(translation_exception, SIGILL, ILL_ILLOPN,
382 "translation exception") 393 "translation exception")
383 394
395#ifdef CONFIG_64BIT
396DO_ERROR_INFO(transaction_exception, SIGILL, ILL_ILLOPN,
397 "transaction constraint exception")
398#endif
399
384static inline void do_fp_trap(struct pt_regs *regs, int fpc) 400static inline void do_fp_trap(struct pt_regs *regs, int fpc)
385{ 401{
386 int si_code = 0; 402 int si_code = 0;
@@ -408,7 +424,7 @@ static void __kprobes illegal_op(struct pt_regs *regs)
408 __u16 __user *location; 424 __u16 __user *location;
409 int signal = 0; 425 int signal = 0;
410 426
411 location = get_psw_address(regs); 427 location = get_trap_ip(regs);
412 428
413 if (user_mode(regs)) { 429 if (user_mode(regs)) {
414 if (get_user(*((__u16 *) opcode), (__u16 __user *) location)) 430 if (get_user(*((__u16 *) opcode), (__u16 __user *) location))
@@ -476,7 +492,7 @@ void specification_exception(struct pt_regs *regs)
476 __u16 __user *location = NULL; 492 __u16 __user *location = NULL;
477 int signal = 0; 493 int signal = 0;
478 494
479 location = (__u16 __user *) get_psw_address(regs); 495 location = (__u16 __user *) get_trap_ip(regs);
480 496
481 if (user_mode(regs)) { 497 if (user_mode(regs)) {
482 get_user(*((__u16 *) opcode), location); 498 get_user(*((__u16 *) opcode), location);
@@ -525,7 +541,7 @@ static void data_exception(struct pt_regs *regs)
525 __u16 __user *location; 541 __u16 __user *location;
526 int signal = 0; 542 int signal = 0;
527 543
528 location = get_psw_address(regs); 544 location = get_trap_ip(regs);
529 545
530 if (MACHINE_HAS_IEEE) 546 if (MACHINE_HAS_IEEE)
531 asm volatile("stfpc %0" : "=m" (current->thread.fp_regs.fpc)); 547 asm volatile("stfpc %0" : "=m" (current->thread.fp_regs.fpc));
@@ -641,6 +657,7 @@ void __init trap_init(void)
641 pgm_check_table[0x12] = &translation_exception; 657 pgm_check_table[0x12] = &translation_exception;
642 pgm_check_table[0x13] = &special_op_exception; 658 pgm_check_table[0x13] = &special_op_exception;
643#ifdef CONFIG_64BIT 659#ifdef CONFIG_64BIT
660 pgm_check_table[0x18] = &transaction_exception;
644 pgm_check_table[0x38] = &do_asce_exception; 661 pgm_check_table[0x38] = &do_asce_exception;
645 pgm_check_table[0x39] = &do_dat_exception; 662 pgm_check_table[0x39] = &do_dat_exception;
646 pgm_check_table[0x3A] = &do_dat_exception; 663 pgm_check_table[0x3A] = &do_dat_exception;
diff --git a/arch/s390/kernel/vdso.c b/arch/s390/kernel/vdso.c
index 9a19ca367c17..d7776281cb60 100644
--- a/arch/s390/kernel/vdso.c
+++ b/arch/s390/kernel/vdso.c
@@ -85,7 +85,7 @@ struct vdso_data *vdso_data = &vdso_data_store.data;
85static void vdso_init_data(struct vdso_data *vd) 85static void vdso_init_data(struct vdso_data *vd)
86{ 86{
87 vd->ectg_available = 87 vd->ectg_available =
88 addressing_mode != HOME_SPACE_MODE && test_facility(31); 88 s390_user_mode != HOME_SPACE_MODE && test_facility(31);
89} 89}
90 90
91#ifdef CONFIG_64BIT 91#ifdef CONFIG_64BIT
@@ -102,7 +102,7 @@ int vdso_alloc_per_cpu(struct _lowcore *lowcore)
102 102
103 lowcore->vdso_per_cpu_data = __LC_PASTE; 103 lowcore->vdso_per_cpu_data = __LC_PASTE;
104 104
105 if (addressing_mode == HOME_SPACE_MODE || !vdso_enabled) 105 if (s390_user_mode == HOME_SPACE_MODE || !vdso_enabled)
106 return 0; 106 return 0;
107 107
108 segment_table = __get_free_pages(GFP_KERNEL, SEGMENT_ORDER); 108 segment_table = __get_free_pages(GFP_KERNEL, SEGMENT_ORDER);
@@ -147,7 +147,7 @@ void vdso_free_per_cpu(struct _lowcore *lowcore)
147 unsigned long segment_table, page_table, page_frame; 147 unsigned long segment_table, page_table, page_frame;
148 u32 *psal, *aste; 148 u32 *psal, *aste;
149 149
150 if (addressing_mode == HOME_SPACE_MODE || !vdso_enabled) 150 if (s390_user_mode == HOME_SPACE_MODE || !vdso_enabled)
151 return; 151 return;
152 152
153 psal = (u32 *)(addr_t) lowcore->paste[4]; 153 psal = (u32 *)(addr_t) lowcore->paste[4];
@@ -165,7 +165,7 @@ static void vdso_init_cr5(void)
165{ 165{
166 unsigned long cr5; 166 unsigned long cr5;
167 167
168 if (addressing_mode == HOME_SPACE_MODE || !vdso_enabled) 168 if (s390_user_mode == HOME_SPACE_MODE || !vdso_enabled)
169 return; 169 return;
170 cr5 = offsetof(struct _lowcore, paste); 170 cr5 = offsetof(struct _lowcore, paste);
171 __ctl_load(cr5, 5, 5); 171 __ctl_load(cr5, 5, 5);
diff --git a/arch/s390/kernel/vtime.c b/arch/s390/kernel/vtime.c
index 4fc97b40a6e1..790334427895 100644
--- a/arch/s390/kernel/vtime.c
+++ b/arch/s390/kernel/vtime.c
@@ -99,7 +99,7 @@ static int do_account_vtime(struct task_struct *tsk, int hardirq_offset)
99 return virt_timer_forward(user + system); 99 return virt_timer_forward(user + system);
100} 100}
101 101
102void account_vtime(struct task_struct *prev, struct task_struct *next) 102void vtime_task_switch(struct task_struct *prev)
103{ 103{
104 struct thread_info *ti; 104 struct thread_info *ti;
105 105
@@ -107,7 +107,7 @@ void account_vtime(struct task_struct *prev, struct task_struct *next)
107 ti = task_thread_info(prev); 107 ti = task_thread_info(prev);
108 ti->user_timer = S390_lowcore.user_timer; 108 ti->user_timer = S390_lowcore.user_timer;
109 ti->system_timer = S390_lowcore.system_timer; 109 ti->system_timer = S390_lowcore.system_timer;
110 ti = task_thread_info(next); 110 ti = task_thread_info(current);
111 S390_lowcore.user_timer = ti->user_timer; 111 S390_lowcore.user_timer = ti->user_timer;
112 S390_lowcore.system_timer = ti->system_timer; 112 S390_lowcore.system_timer = ti->system_timer;
113} 113}
@@ -122,7 +122,7 @@ void account_process_tick(struct task_struct *tsk, int user_tick)
122 * Update process times based on virtual cpu times stored by entry.S 122 * Update process times based on virtual cpu times stored by entry.S
123 * to the lowcore fields user_timer, system_timer & steal_clock. 123 * to the lowcore fields user_timer, system_timer & steal_clock.
124 */ 124 */
125void account_system_vtime(struct task_struct *tsk) 125void vtime_account(struct task_struct *tsk)
126{ 126{
127 struct thread_info *ti = task_thread_info(tsk); 127 struct thread_info *ti = task_thread_info(tsk);
128 u64 timer, system; 128 u64 timer, system;
@@ -138,7 +138,7 @@ void account_system_vtime(struct task_struct *tsk)
138 138
139 virt_timer_forward(system); 139 virt_timer_forward(system);
140} 140}
141EXPORT_SYMBOL_GPL(account_system_vtime); 141EXPORT_SYMBOL_GPL(vtime_account);
142 142
143void __kprobes vtime_stop_cpu(void) 143void __kprobes vtime_stop_cpu(void)
144{ 144{
@@ -378,9 +378,8 @@ static int __cpuinit s390_nohz_notify(struct notifier_block *self,
378 long cpu = (long) hcpu; 378 long cpu = (long) hcpu;
379 379
380 idle = &per_cpu(s390_idle, cpu); 380 idle = &per_cpu(s390_idle, cpu);
381 switch (action) { 381 switch (action & ~CPU_TASKS_FROZEN) {
382 case CPU_DYING: 382 case CPU_DYING:
383 case CPU_DYING_FROZEN:
384 idle->nohz_delay = 0; 383 idle->nohz_delay = 0;
385 default: 384 default:
386 break; 385 break;
diff --git a/arch/s390/kvm/Kconfig b/arch/s390/kvm/Kconfig
index 78eb9847008f..9b04a32e5695 100644
--- a/arch/s390/kvm/Kconfig
+++ b/arch/s390/kvm/Kconfig
@@ -5,7 +5,7 @@ source "virt/kvm/Kconfig"
5 5
6menuconfig VIRTUALIZATION 6menuconfig VIRTUALIZATION
7 def_bool y 7 def_bool y
8 prompt "Virtualization" 8 prompt "KVM"
9 ---help--- 9 ---help---
10 Say Y here to get to see options for using your Linux host to run other 10 Say Y here to get to see options for using your Linux host to run other
11 operating systems inside virtual machines (guests). 11 operating systems inside virtual machines (guests).
diff --git a/arch/s390/kvm/priv.c b/arch/s390/kvm/priv.c
index 60da903d6f3e..310be61bead7 100644
--- a/arch/s390/kvm/priv.c
+++ b/arch/s390/kvm/priv.c
@@ -211,7 +211,7 @@ static void handle_stsi_3_2_2(struct kvm_vcpu *vcpu, struct sysinfo_3_2_2 *mem)
211 spin_unlock(&fi->lock); 211 spin_unlock(&fi->lock);
212 212
213 /* deal with other level 3 hypervisors */ 213 /* deal with other level 3 hypervisors */
214 if (stsi(mem, 3, 2, 2) == -ENOSYS) 214 if (stsi(mem, 3, 2, 2))
215 mem->count = 0; 215 mem->count = 0;
216 if (mem->count < 8) 216 if (mem->count < 8)
217 mem->count++; 217 mem->count++;
@@ -259,7 +259,7 @@ static int handle_stsi(struct kvm_vcpu *vcpu)
259 mem = get_zeroed_page(GFP_KERNEL); 259 mem = get_zeroed_page(GFP_KERNEL);
260 if (!mem) 260 if (!mem)
261 goto out_fail; 261 goto out_fail;
262 if (stsi((void *) mem, fc, sel1, sel2) == -ENOSYS) 262 if (stsi((void *) mem, fc, sel1, sel2))
263 goto out_mem; 263 goto out_mem;
264 break; 264 break;
265 case 3: 265 case 3:
diff --git a/arch/s390/lib/Makefile b/arch/s390/lib/Makefile
index 761ab8b56afc..6ab0d0b5cec8 100644
--- a/arch/s390/lib/Makefile
+++ b/arch/s390/lib/Makefile
@@ -4,6 +4,7 @@
4 4
5lib-y += delay.o string.o uaccess_std.o uaccess_pt.o 5lib-y += delay.o string.o uaccess_std.o uaccess_pt.o
6obj-y += usercopy.o 6obj-y += usercopy.o
7obj-$(CONFIG_32BIT) += div64.o qrnnd.o ucmpdi2.o 7obj-$(CONFIG_32BIT) += div64.o qrnnd.o ucmpdi2.o mem32.o
8obj-$(CONFIG_64BIT) += mem64.o
8lib-$(CONFIG_64BIT) += uaccess_mvcos.o 9lib-$(CONFIG_64BIT) += uaccess_mvcos.o
9lib-$(CONFIG_SMP) += spinlock.o 10lib-$(CONFIG_SMP) += spinlock.o
diff --git a/arch/s390/lib/mem32.S b/arch/s390/lib/mem32.S
new file mode 100644
index 000000000000..14ca9244b615
--- /dev/null
+++ b/arch/s390/lib/mem32.S
@@ -0,0 +1,92 @@
1/*
2 * String handling functions.
3 *
4 * Copyright IBM Corp. 2012
5 */
6
7#include <linux/linkage.h>
8
9/*
10 * memset implementation
11 *
12 * This code corresponds to the C construct below. We do distinguish
13 * between clearing (c == 0) and setting a memory array (c != 0) simply
14 * because nearly all memset invocations in the kernel clear memory and
15 * the xc instruction is preferred in such cases.
16 *
17 * void *memset(void *s, int c, size_t n)
18 * {
19 * if (likely(c == 0))
20 * return __builtin_memset(s, 0, n);
21 * return __builtin_memset(s, c, n);
22 * }
23 */
24ENTRY(memset)
25 basr %r5,%r0
26.Lmemset_base:
27 ltr %r4,%r4
28 bzr %r14
29 ltr %r3,%r3
30 jnz .Lmemset_fill
31 ahi %r4,-1
32 lr %r3,%r4
33 srl %r3,8
34 ltr %r3,%r3
35 lr %r1,%r2
36 je .Lmemset_clear_rest
37.Lmemset_clear_loop:
38 xc 0(256,%r1),0(%r1)
39 la %r1,256(%r1)
40 brct %r3,.Lmemset_clear_loop
41.Lmemset_clear_rest:
42 ex %r4,.Lmemset_xc-.Lmemset_base(%r5)
43 br %r14
44.Lmemset_fill:
45 stc %r3,0(%r2)
46 chi %r4,1
47 lr %r1,%r2
48 ber %r14
49 ahi %r4,-2
50 lr %r3,%r4
51 srl %r3,8
52 ltr %r3,%r3
53 je .Lmemset_fill_rest
54.Lmemset_fill_loop:
55 mvc 1(256,%r1),0(%r1)
56 la %r1,256(%r1)
57 brct %r3,.Lmemset_fill_loop
58.Lmemset_fill_rest:
59 ex %r4,.Lmemset_mvc-.Lmemset_base(%r5)
60 br %r14
61.Lmemset_xc:
62 xc 0(1,%r1),0(%r1)
63.Lmemset_mvc:
64 mvc 1(1,%r1),0(%r1)
65
66/*
67 * memcpy implementation
68 *
69 * void *memcpy(void *dest, const void *src, size_t n)
70 */
71ENTRY(memcpy)
72 basr %r5,%r0
73.Lmemcpy_base:
74 ltr %r4,%r4
75 bzr %r14
76 ahi %r4,-1
77 lr %r0,%r4
78 srl %r0,8
79 ltr %r0,%r0
80 lr %r1,%r2
81 jnz .Lmemcpy_loop
82.Lmemcpy_rest:
83 ex %r4,.Lmemcpy_mvc-.Lmemcpy_base(%r5)
84 br %r14
85.Lmemcpy_loop:
86 mvc 0(256,%r1),0(%r3)
87 la %r1,256(%r1)
88 la %r3,256(%r3)
89 brct %r0,.Lmemcpy_loop
90 j .Lmemcpy_rest
91.Lmemcpy_mvc:
92 mvc 0(1,%r1),0(%r3)
diff --git a/arch/s390/lib/mem64.S b/arch/s390/lib/mem64.S
new file mode 100644
index 000000000000..c6d553e85ab1
--- /dev/null
+++ b/arch/s390/lib/mem64.S
@@ -0,0 +1,88 @@
1/*
2 * String handling functions.
3 *
4 * Copyright IBM Corp. 2012
5 */
6
7#include <linux/linkage.h>
8
9/*
10 * memset implementation
11 *
12 * This code corresponds to the C construct below. We do distinguish
13 * between clearing (c == 0) and setting a memory array (c != 0) simply
14 * because nearly all memset invocations in the kernel clear memory and
15 * the xc instruction is preferred in such cases.
16 *
17 * void *memset(void *s, int c, size_t n)
18 * {
19 * if (likely(c == 0))
20 * return __builtin_memset(s, 0, n);
21 * return __builtin_memset(s, c, n);
22 * }
23 */
24ENTRY(memset)
25 ltgr %r4,%r4
26 bzr %r14
27 ltgr %r3,%r3
28 jnz .Lmemset_fill
29 aghi %r4,-1
30 srlg %r3,%r4,8
31 ltgr %r3,%r3
32 lgr %r1,%r2
33 jz .Lmemset_clear_rest
34.Lmemset_clear_loop:
35 xc 0(256,%r1),0(%r1)
36 la %r1,256(%r1)
37 brctg %r3,.Lmemset_clear_loop
38.Lmemset_clear_rest:
39 larl %r3,.Lmemset_xc
40 ex %r4,0(%r3)
41 br %r14
42.Lmemset_fill:
43 stc %r3,0(%r2)
44 cghi %r4,1
45 lgr %r1,%r2
46 ber %r14
47 aghi %r4,-2
48 srlg %r3,%r4,8
49 ltgr %r3,%r3
50 jz .Lmemset_fill_rest
51.Lmemset_fill_loop:
52 mvc 1(256,%r1),0(%r1)
53 la %r1,256(%r1)
54 brctg %r3,.Lmemset_fill_loop
55.Lmemset_fill_rest:
56 larl %r3,.Lmemset_mvc
57 ex %r4,0(%r3)
58 br %r14
59.Lmemset_xc:
60 xc 0(1,%r1),0(%r1)
61.Lmemset_mvc:
62 mvc 1(1,%r1),0(%r1)
63
64/*
65 * memcpy implementation
66 *
67 * void *memcpy(void *dest, const void *src, size_t n)
68 */
69ENTRY(memcpy)
70 ltgr %r4,%r4
71 bzr %r14
72 aghi %r4,-1
73 srlg %r5,%r4,8
74 ltgr %r5,%r5
75 lgr %r1,%r2
76 jnz .Lmemcpy_loop
77.Lmemcpy_rest:
78 larl %r5,.Lmemcpy_mvc
79 ex %r4,0(%r5)
80 br %r14
81.Lmemcpy_loop:
82 mvc 0(256,%r1),0(%r3)
83 la %r1,256(%r1)
84 la %r3,256(%r3)
85 brctg %r5,.Lmemcpy_loop
86 j .Lmemcpy_rest
87.Lmemcpy_mvc:
88 mvc 0(1,%r1),0(%r3)
diff --git a/arch/s390/lib/string.c b/arch/s390/lib/string.c
index 846ec64ab2c9..b647d5ff0ad9 100644
--- a/arch/s390/lib/string.c
+++ b/arch/s390/lib/string.c
@@ -43,11 +43,7 @@ static inline char *__strnend(const char *s, size_t n)
43 */ 43 */
44size_t strlen(const char *s) 44size_t strlen(const char *s)
45{ 45{
46#if __GNUC__ < 4
47 return __strend(s) - s; 46 return __strend(s) - s;
48#else
49 return __builtin_strlen(s);
50#endif
51} 47}
52EXPORT_SYMBOL(strlen); 48EXPORT_SYMBOL(strlen);
53 49
@@ -73,7 +69,6 @@ EXPORT_SYMBOL(strnlen);
73 */ 69 */
74char *strcpy(char *dest, const char *src) 70char *strcpy(char *dest, const char *src)
75{ 71{
76#if __GNUC__ < 4
77 register int r0 asm("0") = 0; 72 register int r0 asm("0") = 0;
78 char *ret = dest; 73 char *ret = dest;
79 74
@@ -82,9 +77,6 @@ char *strcpy(char *dest, const char *src)
82 : "+&a" (dest), "+&a" (src) : "d" (r0) 77 : "+&a" (dest), "+&a" (src) : "d" (r0)
83 : "cc", "memory" ); 78 : "cc", "memory" );
84 return ret; 79 return ret;
85#else
86 return __builtin_strcpy(dest, src);
87#endif
88} 80}
89EXPORT_SYMBOL(strcpy); 81EXPORT_SYMBOL(strcpy);
90 82
@@ -106,7 +98,7 @@ size_t strlcpy(char *dest, const char *src, size_t size)
106 if (size) { 98 if (size) {
107 size_t len = (ret >= size) ? size-1 : ret; 99 size_t len = (ret >= size) ? size-1 : ret;
108 dest[len] = '\0'; 100 dest[len] = '\0';
109 __builtin_memcpy(dest, src, len); 101 memcpy(dest, src, len);
110 } 102 }
111 return ret; 103 return ret;
112} 104}
@@ -124,8 +116,8 @@ EXPORT_SYMBOL(strlcpy);
124char *strncpy(char *dest, const char *src, size_t n) 116char *strncpy(char *dest, const char *src, size_t n)
125{ 117{
126 size_t len = __strnend(src, n) - src; 118 size_t len = __strnend(src, n) - src;
127 __builtin_memset(dest + len, 0, n - len); 119 memset(dest + len, 0, n - len);
128 __builtin_memcpy(dest, src, len); 120 memcpy(dest, src, len);
129 return dest; 121 return dest;
130} 122}
131EXPORT_SYMBOL(strncpy); 123EXPORT_SYMBOL(strncpy);
@@ -171,7 +163,7 @@ size_t strlcat(char *dest, const char *src, size_t n)
171 if (len >= n) 163 if (len >= n)
172 len = n - 1; 164 len = n - 1;
173 dest[len] = '\0'; 165 dest[len] = '\0';
174 __builtin_memcpy(dest, src, len); 166 memcpy(dest, src, len);
175 } 167 }
176 return res; 168 return res;
177} 169}
@@ -194,7 +186,7 @@ char *strncat(char *dest, const char *src, size_t n)
194 char *p = __strend(dest); 186 char *p = __strend(dest);
195 187
196 p[len] = '\0'; 188 p[len] = '\0';
197 __builtin_memcpy(p, src, len); 189 memcpy(p, src, len);
198 return dest; 190 return dest;
199} 191}
200EXPORT_SYMBOL(strncat); 192EXPORT_SYMBOL(strncat);
@@ -348,41 +340,3 @@ void *memscan(void *s, int c, size_t n)
348 return (void *) ret; 340 return (void *) ret;
349} 341}
350EXPORT_SYMBOL(memscan); 342EXPORT_SYMBOL(memscan);
351
352/**
353 * memcpy - Copy one area of memory to another
354 * @dest: Where to copy to
355 * @src: Where to copy from
356 * @n: The size of the area.
357 *
358 * returns a pointer to @dest
359 */
360void *memcpy(void *dest, const void *src, size_t n)
361{
362 return __builtin_memcpy(dest, src, n);
363}
364EXPORT_SYMBOL(memcpy);
365
366/**
367 * memset - Fill a region of memory with the given value
368 * @s: Pointer to the start of the area.
369 * @c: The byte to fill the area with
370 * @n: The size of the area.
371 *
372 * returns a pointer to @s
373 */
374void *memset(void *s, int c, size_t n)
375{
376 char *xs;
377
378 if (c == 0)
379 return __builtin_memset(s, 0, n);
380
381 xs = (char *) s;
382 if (n > 0)
383 do {
384 *xs++ = c;
385 } while (--n > 0);
386 return s;
387}
388EXPORT_SYMBOL(memset);
diff --git a/arch/s390/mm/Makefile b/arch/s390/mm/Makefile
index d98fe9004a52..0f5536b0c1a1 100644
--- a/arch/s390/mm/Makefile
+++ b/arch/s390/mm/Makefile
@@ -3,7 +3,7 @@
3# 3#
4 4
5obj-y := init.o fault.o extmem.o mmap.o vmem.o pgtable.o maccess.o \ 5obj-y := init.o fault.o extmem.o mmap.o vmem.o pgtable.o maccess.o \
6 page-states.o gup.o 6 page-states.o gup.o extable.o
7obj-$(CONFIG_CMM) += cmm.o 7obj-$(CONFIG_CMM) += cmm.o
8obj-$(CONFIG_HUGETLB_PAGE) += hugetlbpage.o 8obj-$(CONFIG_HUGETLB_PAGE) += hugetlbpage.o
9obj-$(CONFIG_DEBUG_SET_MODULE_RONX) += pageattr.o 9obj-$(CONFIG_DEBUG_SET_MODULE_RONX) += pageattr.o
diff --git a/arch/s390/mm/extable.c b/arch/s390/mm/extable.c
new file mode 100644
index 000000000000..4d1ee88864e8
--- /dev/null
+++ b/arch/s390/mm/extable.c
@@ -0,0 +1,81 @@
1#include <linux/module.h>
2#include <linux/sort.h>
3#include <asm/uaccess.h>
4
5/*
6 * Search one exception table for an entry corresponding to the
7 * given instruction address, and return the address of the entry,
8 * or NULL if none is found.
9 * We use a binary search, and thus we assume that the table is
10 * already sorted.
11 */
12const struct exception_table_entry *
13search_extable(const struct exception_table_entry *first,
14 const struct exception_table_entry *last,
15 unsigned long value)
16{
17 const struct exception_table_entry *mid;
18 unsigned long addr;
19
20 while (first <= last) {
21 mid = ((last - first) >> 1) + first;
22 addr = extable_insn(mid);
23 if (addr < value)
24 first = mid + 1;
25 else if (addr > value)
26 last = mid - 1;
27 else
28 return mid;
29 }
30 return NULL;
31}
32
33/*
34 * The exception table needs to be sorted so that the binary
35 * search that we use to find entries in it works properly.
36 * This is used both for the kernel exception table and for
37 * the exception tables of modules that get loaded.
38 *
39 */
40static int cmp_ex(const void *a, const void *b)
41{
42 const struct exception_table_entry *x = a, *y = b;
43
44 /* This compare is only valid after normalization. */
45 return x->insn - y->insn;
46}
47
48void sort_extable(struct exception_table_entry *start,
49 struct exception_table_entry *finish)
50{
51 struct exception_table_entry *p;
52 int i;
53
54 /* Normalize entries to being relative to the start of the section */
55 for (p = start, i = 0; p < finish; p++, i += 8)
56 p->insn += i;
57 sort(start, finish - start, sizeof(*start), cmp_ex, NULL);
58 /* Denormalize all entries */
59 for (p = start, i = 0; p < finish; p++, i += 8)
60 p->insn -= i;
61}
62
63#ifdef CONFIG_MODULES
64/*
65 * If the exception table is sorted, any referring to the module init
66 * will be at the beginning or the end.
67 */
68void trim_init_extable(struct module *m)
69{
70 /* Trim the beginning */
71 while (m->num_exentries &&
72 within_module_init(extable_insn(&m->extable[0]), m)) {
73 m->extable++;
74 m->num_exentries--;
75 }
76 /* Trim the end */
77 while (m->num_exentries &&
78 within_module_init(extable_insn(&m->extable[m->num_exentries-1]), m))
79 m->num_exentries--;
80}
81#endif /* CONFIG_MODULES */
diff --git a/arch/s390/mm/fault.c b/arch/s390/mm/fault.c
index 6c013f544146..ac9122ca1152 100644
--- a/arch/s390/mm/fault.c
+++ b/arch/s390/mm/fault.c
@@ -111,7 +111,7 @@ static inline int user_space_fault(unsigned long trans_exc_code)
111 if (trans_exc_code == 2) 111 if (trans_exc_code == 2)
112 /* Access via secondary space, set_fs setting decides */ 112 /* Access via secondary space, set_fs setting decides */
113 return current->thread.mm_segment.ar4; 113 return current->thread.mm_segment.ar4;
114 if (addressing_mode == HOME_SPACE_MODE) 114 if (s390_user_mode == HOME_SPACE_MODE)
115 /* User space if the access has been done via home space. */ 115 /* User space if the access has been done via home space. */
116 return trans_exc_code == 3; 116 return trans_exc_code == 3;
117 /* 117 /*
@@ -163,7 +163,7 @@ static noinline void do_no_context(struct pt_regs *regs)
163 /* Are we prepared to handle this kernel fault? */ 163 /* Are we prepared to handle this kernel fault? */
164 fixup = search_exception_tables(regs->psw.addr & PSW_ADDR_INSN); 164 fixup = search_exception_tables(regs->psw.addr & PSW_ADDR_INSN);
165 if (fixup) { 165 if (fixup) {
166 regs->psw.addr = fixup->fixup | PSW_ADDR_AMODE; 166 regs->psw.addr = extable_fixup(fixup) | PSW_ADDR_AMODE;
167 return; 167 return;
168 } 168 }
169 169
@@ -628,9 +628,8 @@ static int __cpuinit pfault_cpu_notify(struct notifier_block *self,
628 struct thread_struct *thread, *next; 628 struct thread_struct *thread, *next;
629 struct task_struct *tsk; 629 struct task_struct *tsk;
630 630
631 switch (action) { 631 switch (action & ~CPU_TASKS_FROZEN) {
632 case CPU_DEAD: 632 case CPU_DEAD:
633 case CPU_DEAD_FROZEN:
634 spin_lock_irq(&pfault_lock); 633 spin_lock_irq(&pfault_lock);
635 list_for_each_entry_safe(thread, next, &pfault_list, list) { 634 list_for_each_entry_safe(thread, next, &pfault_list, list) {
636 thread->pfault_wait = 0; 635 thread->pfault_wait = 0;
diff --git a/arch/s390/mm/gup.c b/arch/s390/mm/gup.c
index 65cb06e2af4e..eeaf8023851f 100644
--- a/arch/s390/mm/gup.c
+++ b/arch/s390/mm/gup.c
@@ -154,6 +154,43 @@ static inline int gup_pud_range(pgd_t *pgdp, pgd_t pgd, unsigned long addr,
154 return 1; 154 return 1;
155} 155}
156 156
157/*
158 * Like get_user_pages_fast() except its IRQ-safe in that it won't fall
159 * back to the regular GUP.
160 */
161int __get_user_pages_fast(unsigned long start, int nr_pages, int write,
162 struct page **pages)
163{
164 struct mm_struct *mm = current->mm;
165 unsigned long addr, len, end;
166 unsigned long next, flags;
167 pgd_t *pgdp, pgd;
168 int nr = 0;
169
170 start &= PAGE_MASK;
171 addr = start;
172 len = (unsigned long) nr_pages << PAGE_SHIFT;
173 end = start + len;
174 if (unlikely(!access_ok(write ? VERIFY_WRITE : VERIFY_READ,
175 (void __user *)start, len)))
176 return 0;
177
178 local_irq_save(flags);
179 pgdp = pgd_offset(mm, addr);
180 do {
181 pgd = *pgdp;
182 barrier();
183 next = pgd_addr_end(addr, end);
184 if (pgd_none(pgd))
185 break;
186 if (!gup_pud_range(pgdp, pgd, addr, next, write, pages, &nr))
187 break;
188 } while (pgdp++, addr = next, addr != end);
189 local_irq_restore(flags);
190
191 return nr;
192}
193
157/** 194/**
158 * get_user_pages_fast() - pin user pages in memory 195 * get_user_pages_fast() - pin user pages in memory
159 * @start: starting user address 196 * @start: starting user address
diff --git a/arch/s390/mm/init.c b/arch/s390/mm/init.c
index 6adbc082618a..81e596c65dee 100644
--- a/arch/s390/mm/init.c
+++ b/arch/s390/mm/init.c
@@ -42,7 +42,7 @@ pgd_t swapper_pg_dir[PTRS_PER_PGD] __attribute__((__aligned__(PAGE_SIZE)));
42unsigned long empty_zero_page, zero_page_mask; 42unsigned long empty_zero_page, zero_page_mask;
43EXPORT_SYMBOL(empty_zero_page); 43EXPORT_SYMBOL(empty_zero_page);
44 44
45static unsigned long setup_zero_pages(void) 45static unsigned long __init setup_zero_pages(void)
46{ 46{
47 struct cpuid cpu_id; 47 struct cpuid cpu_id;
48 unsigned int order; 48 unsigned int order;
@@ -212,7 +212,7 @@ void free_initmem(void)
212} 212}
213 213
214#ifdef CONFIG_BLK_DEV_INITRD 214#ifdef CONFIG_BLK_DEV_INITRD
215void free_initrd_mem(unsigned long start, unsigned long end) 215void __init free_initrd_mem(unsigned long start, unsigned long end)
216{ 216{
217 free_init_pages("initrd memory", start, end); 217 free_init_pages("initrd memory", start, end);
218} 218}
diff --git a/arch/s390/mm/pgtable.c b/arch/s390/mm/pgtable.c
index 18df31d1f2c9..b402991e43d7 100644
--- a/arch/s390/mm/pgtable.c
+++ b/arch/s390/mm/pgtable.c
@@ -609,8 +609,8 @@ static inline unsigned int atomic_xor_bits(atomic_t *v, unsigned int bits)
609 */ 609 */
610unsigned long *page_table_alloc(struct mm_struct *mm, unsigned long vmaddr) 610unsigned long *page_table_alloc(struct mm_struct *mm, unsigned long vmaddr)
611{ 611{
612 struct page *page; 612 unsigned long *uninitialized_var(table);
613 unsigned long *table; 613 struct page *uninitialized_var(page);
614 unsigned int mask, bit; 614 unsigned int mask, bit;
615 615
616 if (mm_has_pgste(mm)) 616 if (mm_has_pgste(mm))
@@ -796,7 +796,7 @@ int s390_enable_sie(void)
796 struct mm_struct *mm, *old_mm; 796 struct mm_struct *mm, *old_mm;
797 797
798 /* Do we have switched amode? If no, we cannot do sie */ 798 /* Do we have switched amode? If no, we cannot do sie */
799 if (addressing_mode == HOME_SPACE_MODE) 799 if (s390_user_mode == HOME_SPACE_MODE)
800 return -EINVAL; 800 return -EINVAL;
801 801
802 /* Do we have pgstes? if yes, we are done */ 802 /* Do we have pgstes? if yes, we are done */
diff --git a/arch/s390/mm/vmem.c b/arch/s390/mm/vmem.c
index 6f896e75ab49..c22abf900c9e 100644
--- a/arch/s390/mm/vmem.c
+++ b/arch/s390/mm/vmem.c
@@ -107,7 +107,7 @@ static int vmem_add_mem(unsigned long start, unsigned long size, int ro)
107 pte = mk_pte_phys(address, __pgprot(ro ? _PAGE_RO : 0)); 107 pte = mk_pte_phys(address, __pgprot(ro ? _PAGE_RO : 0));
108 pm_dir = pmd_offset(pu_dir, address); 108 pm_dir = pmd_offset(pu_dir, address);
109 109
110#ifdef CONFIG_64BIT 110#if defined(CONFIG_64BIT) && !defined(CONFIG_DEBUG_PAGEALLOC)
111 if (MACHINE_HAS_HPAGE && !(address & ~HPAGE_MASK) && 111 if (MACHINE_HAS_HPAGE && !(address & ~HPAGE_MASK) &&
112 (address + HPAGE_SIZE <= start + size) && 112 (address + HPAGE_SIZE <= start + size) &&
113 (address >= HPAGE_SIZE)) { 113 (address >= HPAGE_SIZE)) {
diff --git a/arch/s390/net/Makefile b/arch/s390/net/Makefile
new file mode 100644
index 000000000000..90568c33ddb0
--- /dev/null
+++ b/arch/s390/net/Makefile
@@ -0,0 +1,4 @@
1#
2# Arch-specific network modules
3#
4obj-$(CONFIG_BPF_JIT) += bpf_jit.o bpf_jit_comp.o
diff --git a/arch/s390/net/bpf_jit.S b/arch/s390/net/bpf_jit.S
new file mode 100644
index 000000000000..7e45d13816c1
--- /dev/null
+++ b/arch/s390/net/bpf_jit.S
@@ -0,0 +1,130 @@
1/*
2 * BPF Jit compiler for s390, help functions.
3 *
4 * Copyright IBM Corp. 2012
5 *
6 * Author(s): Martin Schwidefsky <schwidefsky@de.ibm.com>
7 */
8#include <linux/linkage.h>
9
10/*
11 * Calling convention:
12 * registers %r2, %r6-%r8, %r10-%r11, %r13, %r15 are call saved
13 * %r2: skb pointer
14 * %r3: offset parameter
15 * %r5: BPF A accumulator
16 * %r8: return address
17 * %r9: save register for skb pointer
18 * %r10: skb->data
19 * %r11: skb->len - skb->data_len (headlen)
20 * %r12: BPF X accumulator
21 *
22 * skb_copy_bits takes 4 parameters:
23 * %r2 = skb pointer
24 * %r3 = offset into skb data
25 * %r4 = length to copy
26 * %r5 = pointer to temp buffer
27 */
28#define SKBDATA %r8
29
30 /* A = *(u32 *) (skb->data+K+X) */
31ENTRY(sk_load_word_ind)
32 ar %r3,%r12 # offset += X
33 bmr %r8 # < 0 -> return with cc
34
35 /* A = *(u32 *) (skb->data+K) */
36ENTRY(sk_load_word)
37 llgfr %r1,%r3 # extend offset
38 ahi %r3,4 # offset + 4
39 clr %r11,%r3 # hlen <= offset + 4 ?
40 jl sk_load_word_slow
41 l %r5,0(%r1,%r10) # get word from skb
42 xr %r1,%r1 # set cc to zero
43 br %r8
44
45sk_load_word_slow:
46 lgr %r9,%r2 # save %r2
47 lhi %r4,4 # 4 bytes
48 la %r5,160(%r15) # pointer to temp buffer
49 brasl %r14,skb_copy_bits # get data from skb
50 l %r5,160(%r15) # load result from temp buffer
51 ltgr %r2,%r2 # set cc to (%r2 != 0)
52 lgr %r2,%r9 # restore %r2
53 br %r8
54
55 /* A = *(u16 *) (skb->data+K+X) */
56ENTRY(sk_load_half_ind)
57 ar %r3,%r12 # offset += X
58 bmr %r8 # < 0 -> return with cc
59
60 /* A = *(u16 *) (skb->data+K) */
61ENTRY(sk_load_half)
62 llgfr %r1,%r3 # extend offset
63 ahi %r3,2 # offset + 2
64 clr %r11,%r3 # hlen <= offset + 2 ?
65 jl sk_load_half_slow
66 llgh %r5,0(%r1,%r10) # get half from skb
67 xr %r1,%r1 # set cc to zero
68 br %r8
69
70sk_load_half_slow:
71 lgr %r9,%r2 # save %r2
72 lhi %r4,2 # 2 bytes
73 la %r5,162(%r15) # pointer to temp buffer
74 brasl %r14,skb_copy_bits # get data from skb
75 xc 160(2,%r15),160(%r15)
76 l %r5,160(%r15) # load result from temp buffer
77 ltgr %r2,%r2 # set cc to (%r2 != 0)
78 lgr %r2,%r9 # restore %r2
79 br %r8
80
81 /* A = *(u8 *) (skb->data+K+X) */
82ENTRY(sk_load_byte_ind)
83 ar %r3,%r12 # offset += X
84 bmr %r8 # < 0 -> return with cc
85
86 /* A = *(u8 *) (skb->data+K) */
87ENTRY(sk_load_byte)
88 llgfr %r1,%r3 # extend offset
89 clr %r11,%r3 # hlen < offset ?
90 jle sk_load_byte_slow
91 lhi %r5,0
92 ic %r5,0(%r1,%r10) # get byte from skb
93 xr %r1,%r1 # set cc to zero
94 br %r8
95
96sk_load_byte_slow:
97 lgr %r9,%r2 # save %r2
98 lhi %r4,1 # 1 bytes
99 la %r5,163(%r15) # pointer to temp buffer
100 brasl %r14,skb_copy_bits # get data from skb
101 xc 160(3,%r15),160(%r15)
102 l %r5,160(%r15) # load result from temp buffer
103 ltgr %r2,%r2 # set cc to (%r2 != 0)
104 lgr %r2,%r9 # restore %r2
105 br %r8
106
107 /* A = (*(u8 *)(skb->data+K) & 0xf) << 2 */
108ENTRY(sk_load_byte_msh)
109 llgfr %r1,%r3 # extend offset
110 clr %r11,%r3 # hlen < offset ?
111 jle sk_load_byte_slow
112 lhi %r12,0
113 ic %r12,0(%r1,%r10) # get byte from skb
114 nill %r12,0x0f
115 sll %r12,2
116 xr %r1,%r1 # set cc to zero
117 br %r8
118
119sk_load_byte_msh_slow:
120 lgr %r9,%r2 # save %r2
121 lhi %r4,2 # 2 bytes
122 la %r5,162(%r15) # pointer to temp buffer
123 brasl %r14,skb_copy_bits # get data from skb
124 xc 160(3,%r15),160(%r15)
125 l %r12,160(%r15) # load result from temp buffer
126 nill %r12,0x0f
127 sll %r12,2
128 ltgr %r2,%r2 # set cc to (%r2 != 0)
129 lgr %r2,%r9 # restore %r2
130 br %r8
diff --git a/arch/s390/net/bpf_jit_comp.c b/arch/s390/net/bpf_jit_comp.c
new file mode 100644
index 000000000000..9b355b406afa
--- /dev/null
+++ b/arch/s390/net/bpf_jit_comp.c
@@ -0,0 +1,776 @@
1/*
2 * BPF Jit compiler for s390.
3 *
4 * Copyright IBM Corp. 2012
5 *
6 * Author(s): Martin Schwidefsky <schwidefsky@de.ibm.com>
7 */
8#include <linux/moduleloader.h>
9#include <linux/netdevice.h>
10#include <linux/filter.h>
11#include <asm/cacheflush.h>
12#include <asm/processor.h>
13#include <asm/facility.h>
14
15/*
16 * Conventions:
17 * %r2 = skb pointer
18 * %r3 = offset parameter
19 * %r4 = scratch register / length parameter
20 * %r5 = BPF A accumulator
21 * %r8 = return address
22 * %r9 = save register for skb pointer
23 * %r10 = skb->data
24 * %r11 = skb->len - skb->data_len (headlen)
25 * %r12 = BPF X accumulator
26 * %r13 = literal pool pointer
27 * 0(%r15) - 63(%r15) scratch memory array with BPF_MEMWORDS
28 */
29int bpf_jit_enable __read_mostly;
30
31/*
32 * assembly code in arch/x86/net/bpf_jit.S
33 */
34extern u8 sk_load_word[], sk_load_half[], sk_load_byte[], sk_load_byte_msh[];
35extern u8 sk_load_word_ind[], sk_load_half_ind[], sk_load_byte_ind[];
36
37struct bpf_jit {
38 unsigned int seen;
39 u8 *start;
40 u8 *prg;
41 u8 *mid;
42 u8 *lit;
43 u8 *end;
44 u8 *base_ip;
45 u8 *ret0_ip;
46 u8 *exit_ip;
47 unsigned int off_load_word;
48 unsigned int off_load_half;
49 unsigned int off_load_byte;
50 unsigned int off_load_bmsh;
51 unsigned int off_load_iword;
52 unsigned int off_load_ihalf;
53 unsigned int off_load_ibyte;
54};
55
56#define BPF_SIZE_MAX 4096 /* Max size for program */
57
58#define SEEN_DATAREF 1 /* might call external helpers */
59#define SEEN_XREG 2 /* ebx is used */
60#define SEEN_MEM 4 /* use mem[] for temporary storage */
61#define SEEN_RET0 8 /* pc_ret0 points to a valid return 0 */
62#define SEEN_LITERAL 16 /* code uses literals */
63#define SEEN_LOAD_WORD 32 /* code uses sk_load_word */
64#define SEEN_LOAD_HALF 64 /* code uses sk_load_half */
65#define SEEN_LOAD_BYTE 128 /* code uses sk_load_byte */
66#define SEEN_LOAD_BMSH 256 /* code uses sk_load_byte_msh */
67#define SEEN_LOAD_IWORD 512 /* code uses sk_load_word_ind */
68#define SEEN_LOAD_IHALF 1024 /* code uses sk_load_half_ind */
69#define SEEN_LOAD_IBYTE 2048 /* code uses sk_load_byte_ind */
70
71#define EMIT2(op) \
72({ \
73 if (jit->prg + 2 <= jit->mid) \
74 *(u16 *) jit->prg = op; \
75 jit->prg += 2; \
76})
77
78#define EMIT4(op) \
79({ \
80 if (jit->prg + 4 <= jit->mid) \
81 *(u32 *) jit->prg = op; \
82 jit->prg += 4; \
83})
84
85#define EMIT4_DISP(op, disp) \
86({ \
87 unsigned int __disp = (disp) & 0xfff; \
88 EMIT4(op | __disp); \
89})
90
91#define EMIT4_IMM(op, imm) \
92({ \
93 unsigned int __imm = (imm) & 0xffff; \
94 EMIT4(op | __imm); \
95})
96
97#define EMIT4_PCREL(op, pcrel) \
98({ \
99 long __pcrel = ((pcrel) >> 1) & 0xffff; \
100 EMIT4(op | __pcrel); \
101})
102
103#define EMIT6(op1, op2) \
104({ \
105 if (jit->prg + 6 <= jit->mid) { \
106 *(u32 *) jit->prg = op1; \
107 *(u16 *) (jit->prg + 4) = op2; \
108 } \
109 jit->prg += 6; \
110})
111
112#define EMIT6_DISP(op1, op2, disp) \
113({ \
114 unsigned int __disp = (disp) & 0xfff; \
115 EMIT6(op1 | __disp, op2); \
116})
117
118#define EMIT6_IMM(op, imm) \
119({ \
120 unsigned int __imm = (imm); \
121 EMIT6(op | (__imm >> 16), __imm & 0xffff); \
122})
123
124#define EMIT_CONST(val) \
125({ \
126 unsigned int ret; \
127 ret = (unsigned int) (jit->lit - jit->base_ip); \
128 jit->seen |= SEEN_LITERAL; \
129 if (jit->lit + 4 <= jit->end) \
130 *(u32 *) jit->lit = val; \
131 jit->lit += 4; \
132 ret; \
133})
134
135#define EMIT_FN_CONST(bit, fn) \
136({ \
137 unsigned int ret; \
138 ret = (unsigned int) (jit->lit - jit->base_ip); \
139 if (jit->seen & bit) { \
140 jit->seen |= SEEN_LITERAL; \
141 if (jit->lit + 8 <= jit->end) \
142 *(void **) jit->lit = fn; \
143 jit->lit += 8; \
144 } \
145 ret; \
146})
147
148static void bpf_jit_prologue(struct bpf_jit *jit)
149{
150 /* Save registers and create stack frame if necessary */
151 if (jit->seen & SEEN_DATAREF) {
152 /* stmg %r8,%r15,88(%r15) */
153 EMIT6(0xeb8ff058, 0x0024);
154 /* lgr %r14,%r15 */
155 EMIT4(0xb90400ef);
156 /* ahi %r15,<offset> */
157 EMIT4_IMM(0xa7fa0000, (jit->seen & SEEN_MEM) ? -112 : -80);
158 /* stg %r14,152(%r15) */
159 EMIT6(0xe3e0f098, 0x0024);
160 } else if ((jit->seen & SEEN_XREG) && (jit->seen & SEEN_LITERAL))
161 /* stmg %r12,%r13,120(%r15) */
162 EMIT6(0xebcdf078, 0x0024);
163 else if (jit->seen & SEEN_XREG)
164 /* stg %r12,120(%r15) */
165 EMIT6(0xe3c0f078, 0x0024);
166 else if (jit->seen & SEEN_LITERAL)
167 /* stg %r13,128(%r15) */
168 EMIT6(0xe3d0f080, 0x0024);
169
170 /* Setup literal pool */
171 if (jit->seen & SEEN_LITERAL) {
172 /* basr %r13,0 */
173 EMIT2(0x0dd0);
174 jit->base_ip = jit->prg;
175 }
176 jit->off_load_word = EMIT_FN_CONST(SEEN_LOAD_WORD, sk_load_word);
177 jit->off_load_half = EMIT_FN_CONST(SEEN_LOAD_HALF, sk_load_half);
178 jit->off_load_byte = EMIT_FN_CONST(SEEN_LOAD_BYTE, sk_load_byte);
179 jit->off_load_bmsh = EMIT_FN_CONST(SEEN_LOAD_BMSH, sk_load_byte_msh);
180 jit->off_load_iword = EMIT_FN_CONST(SEEN_LOAD_IWORD, sk_load_word_ind);
181 jit->off_load_ihalf = EMIT_FN_CONST(SEEN_LOAD_IHALF, sk_load_half_ind);
182 jit->off_load_ibyte = EMIT_FN_CONST(SEEN_LOAD_IBYTE, sk_load_byte_ind);
183
184 /* Filter needs to access skb data */
185 if (jit->seen & SEEN_DATAREF) {
186 /* l %r11,<len>(%r2) */
187 EMIT4_DISP(0x58b02000, offsetof(struct sk_buff, len));
188 /* s %r11,<data_len>(%r2) */
189 EMIT4_DISP(0x5bb02000, offsetof(struct sk_buff, data_len));
190 /* lg %r10,<data>(%r2) */
191 EMIT6_DISP(0xe3a02000, 0x0004,
192 offsetof(struct sk_buff, data));
193 }
194}
195
196static void bpf_jit_epilogue(struct bpf_jit *jit)
197{
198 /* Return 0 */
199 if (jit->seen & SEEN_RET0) {
200 jit->ret0_ip = jit->prg;
201 /* lghi %r2,0 */
202 EMIT4(0xa7290000);
203 }
204 jit->exit_ip = jit->prg;
205 /* Restore registers */
206 if (jit->seen & SEEN_DATAREF)
207 /* lmg %r8,%r15,<offset>(%r15) */
208 EMIT6_DISP(0xeb8ff000, 0x0004,
209 (jit->seen & SEEN_MEM) ? 200 : 168);
210 else if ((jit->seen & SEEN_XREG) && (jit->seen & SEEN_LITERAL))
211 /* lmg %r12,%r13,120(%r15) */
212 EMIT6(0xebcdf078, 0x0004);
213 else if (jit->seen & SEEN_XREG)
214 /* lg %r12,120(%r15) */
215 EMIT6(0xe3c0f078, 0x0004);
216 else if (jit->seen & SEEN_LITERAL)
217 /* lg %r13,128(%r15) */
218 EMIT6(0xe3d0f080, 0x0004);
219 /* br %r14 */
220 EMIT2(0x07fe);
221}
222
223/*
224 * make sure we dont leak kernel information to user
225 */
226static void bpf_jit_noleaks(struct bpf_jit *jit, struct sock_filter *filter)
227{
228 /* Clear temporary memory if (seen & SEEN_MEM) */
229 if (jit->seen & SEEN_MEM)
230 /* xc 0(64,%r15),0(%r15) */
231 EMIT6(0xd73ff000, 0xf000);
232 /* Clear X if (seen & SEEN_XREG) */
233 if (jit->seen & SEEN_XREG)
234 /* lhi %r12,0 */
235 EMIT4(0xa7c80000);
236 /* Clear A if the first register does not set it. */
237 switch (filter[0].code) {
238 case BPF_S_LD_W_ABS:
239 case BPF_S_LD_H_ABS:
240 case BPF_S_LD_B_ABS:
241 case BPF_S_LD_W_LEN:
242 case BPF_S_LD_W_IND:
243 case BPF_S_LD_H_IND:
244 case BPF_S_LD_B_IND:
245 case BPF_S_LDX_B_MSH:
246 case BPF_S_LD_IMM:
247 case BPF_S_LD_MEM:
248 case BPF_S_MISC_TXA:
249 case BPF_S_ANC_PROTOCOL:
250 case BPF_S_ANC_PKTTYPE:
251 case BPF_S_ANC_IFINDEX:
252 case BPF_S_ANC_MARK:
253 case BPF_S_ANC_QUEUE:
254 case BPF_S_ANC_HATYPE:
255 case BPF_S_ANC_RXHASH:
256 case BPF_S_ANC_CPU:
257 case BPF_S_RET_K:
258 /* first instruction sets A register */
259 break;
260 default: /* A = 0 */
261 /* lhi %r5,0 */
262 EMIT4(0xa7580000);
263 }
264}
265
266static int bpf_jit_insn(struct bpf_jit *jit, struct sock_filter *filter,
267 unsigned int *addrs, int i, int last)
268{
269 unsigned int K;
270 int offset;
271 unsigned int mask;
272
273 K = filter->k;
274 switch (filter->code) {
275 case BPF_S_ALU_ADD_X: /* A += X */
276 jit->seen |= SEEN_XREG;
277 /* ar %r5,%r12 */
278 EMIT2(0x1a5c);
279 break;
280 case BPF_S_ALU_ADD_K: /* A += K */
281 if (!K)
282 break;
283 if (K <= 16383)
284 /* ahi %r5,<K> */
285 EMIT4_IMM(0xa75a0000, K);
286 else if (test_facility(21))
287 /* alfi %r5,<K> */
288 EMIT6_IMM(0xc25b0000, K);
289 else
290 /* a %r5,<d(K)>(%r13) */
291 EMIT4_DISP(0x5a50d000, EMIT_CONST(K));
292 break;
293 case BPF_S_ALU_SUB_X: /* A -= X */
294 jit->seen |= SEEN_XREG;
295 /* sr %r5,%r12 */
296 EMIT2(0x1b5c);
297 break;
298 case BPF_S_ALU_SUB_K: /* A -= K */
299 if (!K)
300 break;
301 if (K <= 16384)
302 /* ahi %r5,-K */
303 EMIT4_IMM(0xa75a0000, -K);
304 else if (test_facility(21))
305 /* alfi %r5,-K */
306 EMIT6_IMM(0xc25b0000, -K);
307 else
308 /* s %r5,<d(K)>(%r13) */
309 EMIT4_DISP(0x5b50d000, EMIT_CONST(K));
310 break;
311 case BPF_S_ALU_MUL_X: /* A *= X */
312 jit->seen |= SEEN_XREG;
313 /* msr %r5,%r12 */
314 EMIT4(0xb252005c);
315 break;
316 case BPF_S_ALU_MUL_K: /* A *= K */
317 if (K <= 16383)
318 /* mhi %r5,K */
319 EMIT4_IMM(0xa75c0000, K);
320 else if (test_facility(34))
321 /* msfi %r5,<K> */
322 EMIT6_IMM(0xc2510000, K);
323 else
324 /* ms %r5,<d(K)>(%r13) */
325 EMIT4_DISP(0x7150d000, EMIT_CONST(K));
326 break;
327 case BPF_S_ALU_DIV_X: /* A /= X */
328 jit->seen |= SEEN_XREG | SEEN_RET0;
329 /* ltr %r12,%r12 */
330 EMIT2(0x12cc);
331 /* jz <ret0> */
332 EMIT4_PCREL(0xa7840000, (jit->ret0_ip - jit->prg));
333 /* lhi %r4,0 */
334 EMIT4(0xa7480000);
335 /* dr %r4,%r12 */
336 EMIT2(0x1d4c);
337 break;
338 case BPF_S_ALU_DIV_K: /* A = reciprocal_divide(A, K) */
339 /* m %r4,<d(K)>(%r13) */
340 EMIT4_DISP(0x5c40d000, EMIT_CONST(K));
341 /* lr %r5,%r4 */
342 EMIT2(0x1854);
343 break;
344 case BPF_S_ALU_AND_X: /* A &= X */
345 jit->seen |= SEEN_XREG;
346 /* nr %r5,%r12 */
347 EMIT2(0x145c);
348 break;
349 case BPF_S_ALU_AND_K: /* A &= K */
350 if (test_facility(21))
351 /* nilf %r5,<K> */
352 EMIT6_IMM(0xc05b0000, K);
353 else
354 /* n %r5,<d(K)>(%r13) */
355 EMIT4_DISP(0x5450d000, EMIT_CONST(K));
356 break;
357 case BPF_S_ALU_OR_X: /* A |= X */
358 jit->seen |= SEEN_XREG;
359 /* or %r5,%r12 */
360 EMIT2(0x165c);
361 break;
362 case BPF_S_ALU_OR_K: /* A |= K */
363 if (test_facility(21))
364 /* oilf %r5,<K> */
365 EMIT6_IMM(0xc05d0000, K);
366 else
367 /* o %r5,<d(K)>(%r13) */
368 EMIT4_DISP(0x5650d000, EMIT_CONST(K));
369 break;
370 case BPF_S_ANC_ALU_XOR_X: /* A ^= X; */
371 jit->seen |= SEEN_XREG;
372 /* xr %r5,%r12 */
373 EMIT2(0x175c);
374 break;
375 case BPF_S_ALU_LSH_X: /* A <<= X; */
376 jit->seen |= SEEN_XREG;
377 /* sll %r5,0(%r12) */
378 EMIT4(0x8950c000);
379 break;
380 case BPF_S_ALU_LSH_K: /* A <<= K */
381 if (K == 0)
382 break;
383 /* sll %r5,K */
384 EMIT4_DISP(0x89500000, K);
385 break;
386 case BPF_S_ALU_RSH_X: /* A >>= X; */
387 jit->seen |= SEEN_XREG;
388 /* srl %r5,0(%r12) */
389 EMIT4(0x8850c000);
390 break;
391 case BPF_S_ALU_RSH_K: /* A >>= K; */
392 if (K == 0)
393 break;
394 /* srl %r5,K */
395 EMIT4_DISP(0x88500000, K);
396 break;
397 case BPF_S_ALU_NEG: /* A = -A */
398 /* lnr %r5,%r5 */
399 EMIT2(0x1155);
400 break;
401 case BPF_S_JMP_JA: /* ip += K */
402 offset = addrs[i + K] + jit->start - jit->prg;
403 EMIT4_PCREL(0xa7f40000, offset);
404 break;
405 case BPF_S_JMP_JGT_K: /* ip += (A > K) ? jt : jf */
406 mask = 0x200000; /* jh */
407 goto kbranch;
408 case BPF_S_JMP_JGE_K: /* ip += (A >= K) ? jt : jf */
409 mask = 0xa00000; /* jhe */
410 goto kbranch;
411 case BPF_S_JMP_JEQ_K: /* ip += (A == K) ? jt : jf */
412 mask = 0x800000; /* je */
413kbranch: /* Emit compare if the branch targets are different */
414 if (filter->jt != filter->jf) {
415 if (K <= 16383)
416 /* chi %r5,<K> */
417 EMIT4_IMM(0xa75e0000, K);
418 else if (test_facility(21))
419 /* clfi %r5,<K> */
420 EMIT6_IMM(0xc25f0000, K);
421 else
422 /* c %r5,<d(K)>(%r13) */
423 EMIT4_DISP(0x5950d000, EMIT_CONST(K));
424 }
425branch: if (filter->jt == filter->jf) {
426 if (filter->jt == 0)
427 break;
428 /* j <jt> */
429 offset = addrs[i + filter->jt] + jit->start - jit->prg;
430 EMIT4_PCREL(0xa7f40000, offset);
431 break;
432 }
433 if (filter->jt != 0) {
434 /* brc <mask>,<jt> */
435 offset = addrs[i + filter->jt] + jit->start - jit->prg;
436 EMIT4_PCREL(0xa7040000 | mask, offset);
437 }
438 if (filter->jf != 0) {
439 /* brc <mask^15>,<jf> */
440 offset = addrs[i + filter->jf] + jit->start - jit->prg;
441 EMIT4_PCREL(0xa7040000 | (mask ^ 0xf00000), offset);
442 }
443 break;
444 case BPF_S_JMP_JSET_K: /* ip += (A & K) ? jt : jf */
445 mask = 0x700000; /* jnz */
446 /* Emit test if the branch targets are different */
447 if (filter->jt != filter->jf) {
448 if (K > 65535) {
449 /* lr %r4,%r5 */
450 EMIT2(0x1845);
451 /* n %r4,<d(K)>(%r13) */
452 EMIT4_DISP(0x5440d000, EMIT_CONST(K));
453 } else
454 /* tmll %r5,K */
455 EMIT4_IMM(0xa7510000, K);
456 }
457 goto branch;
458 case BPF_S_JMP_JGT_X: /* ip += (A > X) ? jt : jf */
459 mask = 0x200000; /* jh */
460 goto xbranch;
461 case BPF_S_JMP_JGE_X: /* ip += (A >= X) ? jt : jf */
462 mask = 0xa00000; /* jhe */
463 goto xbranch;
464 case BPF_S_JMP_JEQ_X: /* ip += (A == X) ? jt : jf */
465 mask = 0x800000; /* je */
466xbranch: /* Emit compare if the branch targets are different */
467 if (filter->jt != filter->jf) {
468 jit->seen |= SEEN_XREG;
469 /* cr %r5,%r12 */
470 EMIT2(0x195c);
471 }
472 goto branch;
473 case BPF_S_JMP_JSET_X: /* ip += (A & X) ? jt : jf */
474 mask = 0x700000; /* jnz */
475 /* Emit test if the branch targets are different */
476 if (filter->jt != filter->jf) {
477 jit->seen |= SEEN_XREG;
478 /* lr %r4,%r5 */
479 EMIT2(0x1845);
480 /* nr %r4,%r12 */
481 EMIT2(0x144c);
482 }
483 goto branch;
484 case BPF_S_LD_W_ABS: /* A = *(u32 *) (skb->data+K) */
485 jit->seen |= SEEN_DATAREF | SEEN_RET0 | SEEN_LOAD_WORD;
486 offset = jit->off_load_word;
487 goto load_abs;
488 case BPF_S_LD_H_ABS: /* A = *(u16 *) (skb->data+K) */
489 jit->seen |= SEEN_DATAREF | SEEN_RET0 | SEEN_LOAD_HALF;
490 offset = jit->off_load_half;
491 goto load_abs;
492 case BPF_S_LD_B_ABS: /* A = *(u8 *) (skb->data+K) */
493 jit->seen |= SEEN_DATAREF | SEEN_RET0 | SEEN_LOAD_BYTE;
494 offset = jit->off_load_byte;
495load_abs: if ((int) K < 0)
496 goto out;
497call_fn: /* lg %r1,<d(function)>(%r13) */
498 EMIT6_DISP(0xe310d000, 0x0004, offset);
499 /* l %r3,<d(K)>(%r13) */
500 EMIT4_DISP(0x5830d000, EMIT_CONST(K));
501 /* basr %r8,%r1 */
502 EMIT2(0x0d81);
503 /* jnz <ret0> */
504 EMIT4_PCREL(0xa7740000, (jit->ret0_ip - jit->prg));
505 break;
506 case BPF_S_LD_W_IND: /* A = *(u32 *) (skb->data+K+X) */
507 jit->seen |= SEEN_DATAREF | SEEN_RET0 | SEEN_LOAD_IWORD;
508 offset = jit->off_load_iword;
509 goto call_fn;
510 case BPF_S_LD_H_IND: /* A = *(u16 *) (skb->data+K+X) */
511 jit->seen |= SEEN_DATAREF | SEEN_RET0 | SEEN_LOAD_IHALF;
512 offset = jit->off_load_ihalf;
513 goto call_fn;
514 case BPF_S_LD_B_IND: /* A = *(u8 *) (skb->data+K+X) */
515 jit->seen |= SEEN_DATAREF | SEEN_RET0 | SEEN_LOAD_IBYTE;
516 offset = jit->off_load_ibyte;
517 goto call_fn;
518 case BPF_S_LDX_B_MSH:
519 /* X = (*(u8 *)(skb->data+K) & 0xf) << 2 */
520 jit->seen |= SEEN_RET0;
521 if ((int) K < 0) {
522 /* j <ret0> */
523 EMIT4_PCREL(0xa7f40000, (jit->ret0_ip - jit->prg));
524 break;
525 }
526 jit->seen |= SEEN_DATAREF | SEEN_LOAD_BMSH;
527 offset = jit->off_load_bmsh;
528 goto call_fn;
529 case BPF_S_LD_W_LEN: /* A = skb->len; */
530 BUILD_BUG_ON(FIELD_SIZEOF(struct sk_buff, len) != 4);
531 /* l %r5,<d(len)>(%r2) */
532 EMIT4_DISP(0x58502000, offsetof(struct sk_buff, len));
533 break;
534 case BPF_S_LDX_W_LEN: /* X = skb->len; */
535 jit->seen |= SEEN_XREG;
536 /* l %r12,<d(len)>(%r2) */
537 EMIT4_DISP(0x58c02000, offsetof(struct sk_buff, len));
538 break;
539 case BPF_S_LD_IMM: /* A = K */
540 if (K <= 16383)
541 /* lhi %r5,K */
542 EMIT4_IMM(0xa7580000, K);
543 else if (test_facility(21))
544 /* llilf %r5,<K> */
545 EMIT6_IMM(0xc05f0000, K);
546 else
547 /* l %r5,<d(K)>(%r13) */
548 EMIT4_DISP(0x5850d000, EMIT_CONST(K));
549 break;
550 case BPF_S_LDX_IMM: /* X = K */
551 jit->seen |= SEEN_XREG;
552 if (K <= 16383)
553 /* lhi %r12,<K> */
554 EMIT4_IMM(0xa7c80000, K);
555 else if (test_facility(21))
556 /* llilf %r12,<K> */
557 EMIT6_IMM(0xc0cf0000, K);
558 else
559 /* l %r12,<d(K)>(%r13) */
560 EMIT4_DISP(0x58c0d000, EMIT_CONST(K));
561 break;
562 case BPF_S_LD_MEM: /* A = mem[K] */
563 jit->seen |= SEEN_MEM;
564 /* l %r5,<K>(%r15) */
565 EMIT4_DISP(0x5850f000,
566 (jit->seen & SEEN_DATAREF) ? 160 + K*4 : K*4);
567 break;
568 case BPF_S_LDX_MEM: /* X = mem[K] */
569 jit->seen |= SEEN_XREG | SEEN_MEM;
570 /* l %r12,<K>(%r15) */
571 EMIT4_DISP(0x58c0f000,
572 (jit->seen & SEEN_DATAREF) ? 160 + K*4 : K*4);
573 break;
574 case BPF_S_MISC_TAX: /* X = A */
575 jit->seen |= SEEN_XREG;
576 /* lr %r12,%r5 */
577 EMIT2(0x18c5);
578 break;
579 case BPF_S_MISC_TXA: /* A = X */
580 jit->seen |= SEEN_XREG;
581 /* lr %r5,%r12 */
582 EMIT2(0x185c);
583 break;
584 case BPF_S_RET_K:
585 if (K == 0) {
586 jit->seen |= SEEN_RET0;
587 if (last)
588 break;
589 /* j <ret0> */
590 EMIT4_PCREL(0xa7f40000, jit->ret0_ip - jit->prg);
591 } else {
592 if (K <= 16383)
593 /* lghi %r2,K */
594 EMIT4_IMM(0xa7290000, K);
595 else
596 /* llgf %r2,<K>(%r13) */
597 EMIT6_DISP(0xe320d000, 0x0016, EMIT_CONST(K));
598 /* j <exit> */
599 if (last && !(jit->seen & SEEN_RET0))
600 break;
601 EMIT4_PCREL(0xa7f40000, jit->exit_ip - jit->prg);
602 }
603 break;
604 case BPF_S_RET_A:
605 /* llgfr %r2,%r5 */
606 EMIT4(0xb9160025);
607 /* j <exit> */
608 EMIT4_PCREL(0xa7f40000, jit->exit_ip - jit->prg);
609 break;
610 case BPF_S_ST: /* mem[K] = A */
611 jit->seen |= SEEN_MEM;
612 /* st %r5,<K>(%r15) */
613 EMIT4_DISP(0x5050f000,
614 (jit->seen & SEEN_DATAREF) ? 160 + K*4 : K*4);
615 break;
616 case BPF_S_STX: /* mem[K] = X : mov %ebx,off8(%rbp) */
617 jit->seen |= SEEN_XREG | SEEN_MEM;
618 /* st %r12,<K>(%r15) */
619 EMIT4_DISP(0x50c0f000,
620 (jit->seen & SEEN_DATAREF) ? 160 + K*4 : K*4);
621 break;
622 case BPF_S_ANC_PROTOCOL: /* A = ntohs(skb->protocol); */
623 BUILD_BUG_ON(FIELD_SIZEOF(struct sk_buff, protocol) != 2);
624 /* lhi %r5,0 */
625 EMIT4(0xa7580000);
626 /* icm %r5,3,<d(protocol)>(%r2) */
627 EMIT4_DISP(0xbf532000, offsetof(struct sk_buff, protocol));
628 break;
629 case BPF_S_ANC_IFINDEX: /* if (!skb->dev) return 0;
630 * A = skb->dev->ifindex */
631 BUILD_BUG_ON(FIELD_SIZEOF(struct net_device, ifindex) != 4);
632 jit->seen |= SEEN_RET0;
633 /* lg %r1,<d(dev)>(%r2) */
634 EMIT6_DISP(0xe3102000, 0x0004, offsetof(struct sk_buff, dev));
635 /* ltgr %r1,%r1 */
636 EMIT4(0xb9020011);
637 /* jz <ret0> */
638 EMIT4_PCREL(0xa7840000, jit->ret0_ip - jit->prg);
639 /* l %r5,<d(ifindex)>(%r1) */
640 EMIT4_DISP(0x58501000, offsetof(struct net_device, ifindex));
641 break;
642 case BPF_S_ANC_MARK: /* A = skb->mark */
643 BUILD_BUG_ON(FIELD_SIZEOF(struct sk_buff, mark) != 4);
644 /* l %r5,<d(mark)>(%r2) */
645 EMIT4_DISP(0x58502000, offsetof(struct sk_buff, mark));
646 break;
647 case BPF_S_ANC_QUEUE: /* A = skb->queue_mapping */
648 BUILD_BUG_ON(FIELD_SIZEOF(struct sk_buff, queue_mapping) != 2);
649 /* lhi %r5,0 */
650 EMIT4(0xa7580000);
651 /* icm %r5,3,<d(queue_mapping)>(%r2) */
652 EMIT4_DISP(0xbf532000, offsetof(struct sk_buff, queue_mapping));
653 break;
654 case BPF_S_ANC_HATYPE: /* if (!skb->dev) return 0;
655 * A = skb->dev->type */
656 BUILD_BUG_ON(FIELD_SIZEOF(struct net_device, type) != 2);
657 jit->seen |= SEEN_RET0;
658 /* lg %r1,<d(dev)>(%r2) */
659 EMIT6_DISP(0xe3102000, 0x0004, offsetof(struct sk_buff, dev));
660 /* ltgr %r1,%r1 */
661 EMIT4(0xb9020011);
662 /* jz <ret0> */
663 EMIT4_PCREL(0xa7840000, jit->ret0_ip - jit->prg);
664 /* lhi %r5,0 */
665 EMIT4(0xa7580000);
666 /* icm %r5,3,<d(type)>(%r1) */
667 EMIT4_DISP(0xbf531000, offsetof(struct net_device, type));
668 break;
669 case BPF_S_ANC_RXHASH: /* A = skb->rxhash */
670 BUILD_BUG_ON(FIELD_SIZEOF(struct sk_buff, rxhash) != 4);
671 /* l %r5,<d(rxhash)>(%r2) */
672 EMIT4_DISP(0x58502000, offsetof(struct sk_buff, rxhash));
673 break;
674 case BPF_S_ANC_CPU: /* A = smp_processor_id() */
675#ifdef CONFIG_SMP
676 /* l %r5,<d(cpu_nr)> */
677 EMIT4_DISP(0x58500000, offsetof(struct _lowcore, cpu_nr));
678#else
679 /* lhi %r5,0 */
680 EMIT4(0xa7580000);
681#endif
682 break;
683 default: /* too complex, give up */
684 goto out;
685 }
686 addrs[i] = jit->prg - jit->start;
687 return 0;
688out:
689 return -1;
690}
691
692void bpf_jit_compile(struct sk_filter *fp)
693{
694 unsigned long size, prg_len, lit_len;
695 struct bpf_jit jit, cjit;
696 unsigned int *addrs;
697 int pass, i;
698
699 if (!bpf_jit_enable)
700 return;
701 addrs = kmalloc(fp->len * sizeof(*addrs), GFP_KERNEL);
702 if (addrs == NULL)
703 return;
704 memset(addrs, 0, fp->len * sizeof(*addrs));
705 memset(&jit, 0, sizeof(cjit));
706 memset(&cjit, 0, sizeof(cjit));
707
708 for (pass = 0; pass < 10; pass++) {
709 jit.prg = jit.start;
710 jit.lit = jit.mid;
711
712 bpf_jit_prologue(&jit);
713 bpf_jit_noleaks(&jit, fp->insns);
714 for (i = 0; i < fp->len; i++) {
715 if (bpf_jit_insn(&jit, fp->insns + i, addrs, i,
716 i == fp->len - 1))
717 goto out;
718 }
719 bpf_jit_epilogue(&jit);
720 if (jit.start) {
721 WARN_ON(jit.prg > cjit.prg || jit.lit > cjit.lit);
722 if (memcmp(&jit, &cjit, sizeof(jit)) == 0)
723 break;
724 } else if (jit.prg == cjit.prg && jit.lit == cjit.lit) {
725 prg_len = jit.prg - jit.start;
726 lit_len = jit.lit - jit.mid;
727 size = max_t(unsigned long, prg_len + lit_len,
728 sizeof(struct work_struct));
729 if (size >= BPF_SIZE_MAX)
730 goto out;
731 jit.start = module_alloc(size);
732 if (!jit.start)
733 goto out;
734 jit.prg = jit.mid = jit.start + prg_len;
735 jit.lit = jit.end = jit.start + prg_len + lit_len;
736 jit.base_ip += (unsigned long) jit.start;
737 jit.exit_ip += (unsigned long) jit.start;
738 jit.ret0_ip += (unsigned long) jit.start;
739 }
740 cjit = jit;
741 }
742 if (bpf_jit_enable > 1) {
743 pr_err("flen=%d proglen=%lu pass=%d image=%p\n",
744 fp->len, jit.end - jit.start, pass, jit.start);
745 if (jit.start) {
746 printk(KERN_ERR "JIT code:\n");
747 print_fn_code(jit.start, jit.mid - jit.start);
748 print_hex_dump(KERN_ERR, "JIT literals:\n",
749 DUMP_PREFIX_ADDRESS, 16, 1,
750 jit.mid, jit.end - jit.mid, false);
751 }
752 }
753 if (jit.start)
754 fp->bpf_func = (void *) jit.start;
755out:
756 kfree(addrs);
757}
758
759static void jit_free_defer(struct work_struct *arg)
760{
761 module_free(NULL, arg);
762}
763
764/* run from softirq, we must use a work_struct to call
765 * module_free() from process context
766 */
767void bpf_jit_free(struct sk_filter *fp)
768{
769 struct work_struct *work;
770
771 if (fp->bpf_func == sk_run_filter)
772 return;
773 work = (struct work_struct *)fp->bpf_func;
774 INIT_WORK(work, jit_free_defer);
775 schedule_work(work);
776}