aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorLinus Torvalds <torvalds@linux-foundation.org>2015-11-04 14:31:31 -0500
committerLinus Torvalds <torvalds@linux-foundation.org>2015-11-04 14:31:31 -0500
commite627078a0cbdc0c391efeb5a2c4eb287328fd633 (patch)
tree9cdabfc9c661ea2ac8801f4611e9541a6411706a
parent14c79092909a52b6fd6394b6ad5e7756c4f9565e (diff)
parentb38feccd663b55ab07116208b68e1ffc7c3c7e78 (diff)
Merge branch 'for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/s390/linux
Pull s390 updates from Martin Schwidefsky: "There is only one new feature in this pull for the 4.4 merge window, most of it is small enhancements, cleanup and bug fixes: - Add the s390 backend for the software dirty bit tracking. This adds two new pgtable functions pte_clear_soft_dirty and pmd_clear_soft_dirty which is why there is a hit to arch/x86/include/asm/pgtable.h in this pull request. - A series of cleanup patches for the AP bus, this includes the removal of the support for two outdated crypto cards (PCICC and PCICA). - The irq handling / signaling on buffer full in the runtime instrumentation code is dropped. - Some micro optimizations: remove unnecessary memory barriers for a couple of functions: [smb_]rmb, [smb_]wmb, atomics, bitops, and for spin_unlock. Use the builtin bswap if available and make test_and_set_bit_lock more cache friendly. - Statistics and a tracepoint for the diagnose calls to the hypervisor. - The CPU measurement facility support to sample KVM guests is improved. - The vector instructions are now always enabled for user space processes if the hardware has the vector facility. This simplifies the FPU handling code. The fpu-internal.h header is split into fpu internals, api and types just like x86. - Cleanup and improvements for the common I/O layer. - Rework udelay to solve a problem with kprobe. udelay has busy loop semantics but still uses an idle processor state for the wait" * 'for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/s390/linux: (66 commits) s390: remove runtime instrumentation interrupts s390/cio: de-duplicate subchannel validation s390/css: unneeded initialization in for_each_subchannel s390/Kconfig: use builtin bswap s390/dasd: fix disconnected device with valid path mask s390/dasd: fix invalid PAV assignment after suspend/resume s390/dasd: fix double free in dasd_eckd_read_conf s390/kernel: fix ptrace peek/poke for floating point registers s390/cio: move ccw_device_stlck functions s390/cio: move ccw_device_call_handler s390/topology: reduce per_cpu() invocations s390/nmi: reduce size of percpu variable s390/nmi: fix terminology s390/nmi: remove casts s390/nmi: remove pointless error strings s390: don't store registers on disabled wait anymore s390: get rid of __set_psw_mask() s390/fpu: split fpu-internal.h into fpu internals, api, and type headers s390/dasd: fix list_del corruption after lcu changes s390/spinlock: remove unneeded serializations at unlock ...
-rw-r--r--arch/s390/Kconfig2
-rw-r--r--arch/s390/hypfs/hypfs_diag.c10
-rw-r--r--arch/s390/hypfs/hypfs_diag0c.c2
-rw-r--r--arch/s390/hypfs/hypfs_sprp.c9
-rw-r--r--arch/s390/hypfs/hypfs_vm.c2
-rw-r--r--arch/s390/include/asm/appldata.h2
-rw-r--r--arch/s390/include/asm/atomic.h2
-rw-r--r--arch/s390/include/asm/barrier.h8
-rw-r--r--arch/s390/include/asm/bitops.h55
-rw-r--r--arch/s390/include/asm/cio.h10
-rw-r--r--arch/s390/include/asm/cmb.h1
-rw-r--r--arch/s390/include/asm/cmpxchg.h30
-rw-r--r--arch/s390/include/asm/cpu_mf.h5
-rw-r--r--arch/s390/include/asm/ctl_reg.h2
-rw-r--r--arch/s390/include/asm/diag.h29
-rw-r--r--arch/s390/include/asm/etr.h10
-rw-r--r--arch/s390/include/asm/fpu/api.h30
-rw-r--r--arch/s390/include/asm/fpu/internal.h (renamed from arch/s390/include/asm/fpu-internal.h)51
-rw-r--r--arch/s390/include/asm/fpu/types.h25
-rw-r--r--arch/s390/include/asm/idle.h2
-rw-r--r--arch/s390/include/asm/irq.h14
-rw-r--r--arch/s390/include/asm/kvm_host.h2
-rw-r--r--arch/s390/include/asm/kvm_para.h67
-rw-r--r--arch/s390/include/asm/lowcore.h11
-rw-r--r--arch/s390/include/asm/nmi.h98
-rw-r--r--arch/s390/include/asm/pgtable.h66
-rw-r--r--arch/s390/include/asm/processor.h99
-rw-r--r--arch/s390/include/asm/ptrace.h11
-rw-r--r--arch/s390/include/asm/setup.h50
-rw-r--r--arch/s390/include/asm/spinlock.h3
-rw-r--r--arch/s390/include/asm/switch_to.h2
-rw-r--r--arch/s390/include/asm/thread_info.h22
-rw-r--r--arch/s390/include/asm/trace/diag.h43
-rw-r--r--arch/s390/kernel/Makefile2
-rw-r--r--arch/s390/kernel/asm-offsets.c290
-rw-r--r--arch/s390/kernel/compat_signal.c7
-rw-r--r--arch/s390/kernel/cpcmd.c2
-rw-r--r--arch/s390/kernel/crash_dump.c16
-rw-r--r--arch/s390/kernel/diag.c134
-rw-r--r--arch/s390/kernel/early.c15
-rw-r--r--arch/s390/kernel/entry.S230
-rw-r--r--arch/s390/kernel/entry.h3
-rw-r--r--arch/s390/kernel/head64.S7
-rw-r--r--arch/s390/kernel/ipl.c9
-rw-r--r--arch/s390/kernel/irq.c1
-rw-r--r--arch/s390/kernel/nmi.c120
-rw-r--r--arch/s390/kernel/perf_cpum_sf.c10
-rw-r--r--arch/s390/kernel/process.c37
-rw-r--r--arch/s390/kernel/processor.c5
-rw-r--r--arch/s390/kernel/ptrace.c52
-rw-r--r--arch/s390/kernel/runtime_instr.c64
-rw-r--r--arch/s390/kernel/s390_ksyms.c3
-rw-r--r--arch/s390/kernel/signal.c7
-rw-r--r--arch/s390/kernel/smp.c10
-rw-r--r--arch/s390/kernel/time.c31
-rw-r--r--arch/s390/kernel/topology.c28
-rw-r--r--arch/s390/kernel/trace.c29
-rw-r--r--arch/s390/kernel/traps.c41
-rw-r--r--arch/s390/kernel/vdso.c2
-rw-r--r--arch/s390/kvm/kvm-s390.c6
-rw-r--r--arch/s390/lib/delay.c30
-rw-r--r--arch/s390/lib/find.c4
-rw-r--r--arch/s390/lib/spinlock.c4
-rw-r--r--arch/s390/mm/extmem.c3
-rw-r--r--arch/s390/mm/fault.c7
-rw-r--r--arch/s390/mm/hugetlbpage.c2
-rw-r--r--arch/s390/numa/mode_emu.c10
-rw-r--r--arch/s390/pci/pci_insn.c6
-rw-r--r--arch/x86/include/asm/pgtable.h10
-rw-r--r--drivers/s390/block/dasd.c1
-rw-r--r--drivers/s390/block/dasd_alias.c5
-rw-r--r--drivers/s390/block/dasd_diag.c2
-rw-r--r--drivers/s390/block/dasd_eckd.c73
-rw-r--r--drivers/s390/char/diag_ftp.c2
-rw-r--r--drivers/s390/char/sclp_rw.c136
-rw-r--r--drivers/s390/char/sclp_rw.h17
-rw-r--r--drivers/s390/char/sclp_tty.c8
-rw-r--r--drivers/s390/cio/cio.c33
-rw-r--r--drivers/s390/cio/cmf.c220
-rw-r--r--drivers/s390/cio/css.c2
-rw-r--r--drivers/s390/cio/device.c4
-rw-r--r--drivers/s390/cio/device.h6
-rw-r--r--drivers/s390/cio/device_fsm.c38
-rw-r--r--drivers/s390/cio/device_ops.c107
-rw-r--r--drivers/s390/cio/device_pgid.c70
-rw-r--r--drivers/s390/crypto/Makefile4
-rw-r--r--drivers/s390/crypto/ap_bus.c1810
-rw-r--r--drivers/s390/crypto/ap_bus.h67
-rw-r--r--drivers/s390/crypto/zcrypt_api.c3
-rw-r--r--drivers/s390/crypto/zcrypt_cca_key.h2
-rw-r--r--drivers/s390/crypto/zcrypt_msgtype50.c28
-rw-r--r--drivers/s390/crypto/zcrypt_msgtype6.c59
-rw-r--r--drivers/s390/crypto/zcrypt_pcica.c420
-rw-r--r--drivers/s390/crypto/zcrypt_pcica.h115
-rw-r--r--drivers/s390/crypto/zcrypt_pcicc.c627
-rw-r--r--drivers/s390/crypto/zcrypt_pcicc.h174
-rw-r--r--drivers/s390/virtio/virtio_ccw.c15
-rw-r--r--drivers/watchdog/diag288_wdt.c4
-rw-r--r--fs/proc/task_mmu.c4
-rw-r--r--include/asm-generic/pgtable.h10
-rw-r--r--include/linux/compiler.h2
101 files changed, 2412 insertions, 3668 deletions
diff --git a/arch/s390/Kconfig b/arch/s390/Kconfig
index 1d57000b1b24..9b9a2db06810 100644
--- a/arch/s390/Kconfig
+++ b/arch/s390/Kconfig
@@ -101,6 +101,7 @@ config S390
101 select ARCH_SAVE_PAGE_KEYS if HIBERNATION 101 select ARCH_SAVE_PAGE_KEYS if HIBERNATION
102 select ARCH_SUPPORTS_ATOMIC_RMW 102 select ARCH_SUPPORTS_ATOMIC_RMW
103 select ARCH_SUPPORTS_NUMA_BALANCING 103 select ARCH_SUPPORTS_NUMA_BALANCING
104 select ARCH_USE_BUILTIN_BSWAP
104 select ARCH_USE_CMPXCHG_LOCKREF 105 select ARCH_USE_CMPXCHG_LOCKREF
105 select ARCH_WANTS_PROT_NUMA_PROT_NONE 106 select ARCH_WANTS_PROT_NUMA_PROT_NONE
106 select ARCH_WANT_IPC_PARSE_VERSION 107 select ARCH_WANT_IPC_PARSE_VERSION
@@ -118,6 +119,7 @@ config S390
118 select HAVE_ARCH_EARLY_PFN_TO_NID 119 select HAVE_ARCH_EARLY_PFN_TO_NID
119 select HAVE_ARCH_JUMP_LABEL 120 select HAVE_ARCH_JUMP_LABEL
120 select HAVE_ARCH_SECCOMP_FILTER 121 select HAVE_ARCH_SECCOMP_FILTER
122 select HAVE_ARCH_SOFT_DIRTY
121 select HAVE_ARCH_TRACEHOOK 123 select HAVE_ARCH_TRACEHOOK
122 select HAVE_ARCH_TRANSPARENT_HUGEPAGE 124 select HAVE_ARCH_TRANSPARENT_HUGEPAGE
123 select HAVE_BPF_JIT if PACK_STACK && HAVE_MARCH_Z196_FEATURES 125 select HAVE_BPF_JIT if PACK_STACK && HAVE_MARCH_Z196_FEATURES
diff --git a/arch/s390/hypfs/hypfs_diag.c b/arch/s390/hypfs/hypfs_diag.c
index 5eeffeefae06..045035796ca7 100644
--- a/arch/s390/hypfs/hypfs_diag.c
+++ b/arch/s390/hypfs/hypfs_diag.c
@@ -15,6 +15,7 @@
15#include <linux/string.h> 15#include <linux/string.h>
16#include <linux/vmalloc.h> 16#include <linux/vmalloc.h>
17#include <linux/mm.h> 17#include <linux/mm.h>
18#include <asm/diag.h>
18#include <asm/ebcdic.h> 19#include <asm/ebcdic.h>
19#include "hypfs.h" 20#include "hypfs.h"
20 21
@@ -336,7 +337,7 @@ static inline __u64 phys_cpu__ctidx(enum diag204_format type, void *hdr)
336 337
337/* Diagnose 204 functions */ 338/* Diagnose 204 functions */
338 339
339static int diag204(unsigned long subcode, unsigned long size, void *addr) 340static inline int __diag204(unsigned long subcode, unsigned long size, void *addr)
340{ 341{
341 register unsigned long _subcode asm("0") = subcode; 342 register unsigned long _subcode asm("0") = subcode;
342 register unsigned long _size asm("1") = size; 343 register unsigned long _size asm("1") = size;
@@ -351,6 +352,12 @@ static int diag204(unsigned long subcode, unsigned long size, void *addr)
351 return _size; 352 return _size;
352} 353}
353 354
355static int diag204(unsigned long subcode, unsigned long size, void *addr)
356{
357 diag_stat_inc(DIAG_STAT_X204);
358 return __diag204(subcode, size, addr);
359}
360
354/* 361/*
355 * For the old diag subcode 4 with simple data format we have to use real 362 * For the old diag subcode 4 with simple data format we have to use real
356 * memory. If we use subcode 6 or 7 with extended data format, we can (and 363 * memory. If we use subcode 6 or 7 with extended data format, we can (and
@@ -505,6 +512,7 @@ static int diag224(void *ptr)
505{ 512{
506 int rc = -EOPNOTSUPP; 513 int rc = -EOPNOTSUPP;
507 514
515 diag_stat_inc(DIAG_STAT_X224);
508 asm volatile( 516 asm volatile(
509 " diag %1,%2,0x224\n" 517 " diag %1,%2,0x224\n"
510 "0: lhi %0,0x0\n" 518 "0: lhi %0,0x0\n"
diff --git a/arch/s390/hypfs/hypfs_diag0c.c b/arch/s390/hypfs/hypfs_diag0c.c
index 24c747a0fcc3..0f1927cbba31 100644
--- a/arch/s390/hypfs/hypfs_diag0c.c
+++ b/arch/s390/hypfs/hypfs_diag0c.c
@@ -8,6 +8,7 @@
8 8
9#include <linux/slab.h> 9#include <linux/slab.h>
10#include <linux/cpu.h> 10#include <linux/cpu.h>
11#include <asm/diag.h>
11#include <asm/hypfs.h> 12#include <asm/hypfs.h>
12#include "hypfs.h" 13#include "hypfs.h"
13 14
@@ -18,6 +19,7 @@
18 */ 19 */
19static void diag0c(struct hypfs_diag0c_entry *entry) 20static void diag0c(struct hypfs_diag0c_entry *entry)
20{ 21{
22 diag_stat_inc(DIAG_STAT_X00C);
21 asm volatile ( 23 asm volatile (
22 " sam31\n" 24 " sam31\n"
23 " diag %0,%0,0x0c\n" 25 " diag %0,%0,0x0c\n"
diff --git a/arch/s390/hypfs/hypfs_sprp.c b/arch/s390/hypfs/hypfs_sprp.c
index dd42a26d049d..c9e5c72f78bd 100644
--- a/arch/s390/hypfs/hypfs_sprp.c
+++ b/arch/s390/hypfs/hypfs_sprp.c
@@ -13,6 +13,7 @@
13#include <linux/types.h> 13#include <linux/types.h>
14#include <linux/uaccess.h> 14#include <linux/uaccess.h>
15#include <asm/compat.h> 15#include <asm/compat.h>
16#include <asm/diag.h>
16#include <asm/sclp.h> 17#include <asm/sclp.h>
17#include "hypfs.h" 18#include "hypfs.h"
18 19
@@ -22,7 +23,7 @@
22 23
23#define DIAG304_CMD_MAX 2 24#define DIAG304_CMD_MAX 2
24 25
25static unsigned long hypfs_sprp_diag304(void *data, unsigned long cmd) 26static inline unsigned long __hypfs_sprp_diag304(void *data, unsigned long cmd)
26{ 27{
27 register unsigned long _data asm("2") = (unsigned long) data; 28 register unsigned long _data asm("2") = (unsigned long) data;
28 register unsigned long _rc asm("3"); 29 register unsigned long _rc asm("3");
@@ -34,6 +35,12 @@ static unsigned long hypfs_sprp_diag304(void *data, unsigned long cmd)
34 return _rc; 35 return _rc;
35} 36}
36 37
38static unsigned long hypfs_sprp_diag304(void *data, unsigned long cmd)
39{
40 diag_stat_inc(DIAG_STAT_X304);
41 return __hypfs_sprp_diag304(data, cmd);
42}
43
37static void hypfs_sprp_free(const void *data) 44static void hypfs_sprp_free(const void *data)
38{ 45{
39 free_page((unsigned long) data); 46 free_page((unsigned long) data);
diff --git a/arch/s390/hypfs/hypfs_vm.c b/arch/s390/hypfs/hypfs_vm.c
index afbe07907c10..44feac38ccfc 100644
--- a/arch/s390/hypfs/hypfs_vm.c
+++ b/arch/s390/hypfs/hypfs_vm.c
@@ -9,6 +9,7 @@
9#include <linux/errno.h> 9#include <linux/errno.h>
10#include <linux/string.h> 10#include <linux/string.h>
11#include <linux/vmalloc.h> 11#include <linux/vmalloc.h>
12#include <asm/diag.h>
12#include <asm/ebcdic.h> 13#include <asm/ebcdic.h>
13#include <asm/timex.h> 14#include <asm/timex.h>
14#include "hypfs.h" 15#include "hypfs.h"
@@ -66,6 +67,7 @@ static int diag2fc(int size, char* query, void *addr)
66 memset(parm_list.aci_grp, 0x40, NAME_LEN); 67 memset(parm_list.aci_grp, 0x40, NAME_LEN);
67 rc = -1; 68 rc = -1;
68 69
70 diag_stat_inc(DIAG_STAT_X2FC);
69 asm volatile( 71 asm volatile(
70 " diag %0,%1,0x2fc\n" 72 " diag %0,%1,0x2fc\n"
71 "0:\n" 73 "0:\n"
diff --git a/arch/s390/include/asm/appldata.h b/arch/s390/include/asm/appldata.h
index 16887c5fd989..a6263d4e8e56 100644
--- a/arch/s390/include/asm/appldata.h
+++ b/arch/s390/include/asm/appldata.h
@@ -7,6 +7,7 @@
7#ifndef _ASM_S390_APPLDATA_H 7#ifndef _ASM_S390_APPLDATA_H
8#define _ASM_S390_APPLDATA_H 8#define _ASM_S390_APPLDATA_H
9 9
10#include <asm/diag.h>
10#include <asm/io.h> 11#include <asm/io.h>
11 12
12#define APPLDATA_START_INTERVAL_REC 0x80 13#define APPLDATA_START_INTERVAL_REC 0x80
@@ -53,6 +54,7 @@ static inline int appldata_asm(struct appldata_product_id *id,
53 parm_list.buffer_length = length; 54 parm_list.buffer_length = length;
54 parm_list.product_id_addr = (unsigned long) id; 55 parm_list.product_id_addr = (unsigned long) id;
55 parm_list.buffer_addr = virt_to_phys(buffer); 56 parm_list.buffer_addr = virt_to_phys(buffer);
57 diag_stat_inc(DIAG_STAT_X0DC);
56 asm volatile( 58 asm volatile(
57 " diag %1,%0,0xdc" 59 " diag %1,%0,0xdc"
58 : "=d" (ry) 60 : "=d" (ry)
diff --git a/arch/s390/include/asm/atomic.h b/arch/s390/include/asm/atomic.h
index 117fa5c921c1..911064aa59b2 100644
--- a/arch/s390/include/asm/atomic.h
+++ b/arch/s390/include/asm/atomic.h
@@ -36,7 +36,6 @@
36 \ 36 \
37 typecheck(atomic_t *, ptr); \ 37 typecheck(atomic_t *, ptr); \
38 asm volatile( \ 38 asm volatile( \
39 __barrier \
40 op_string " %0,%2,%1\n" \ 39 op_string " %0,%2,%1\n" \
41 __barrier \ 40 __barrier \
42 : "=d" (old_val), "+Q" ((ptr)->counter) \ 41 : "=d" (old_val), "+Q" ((ptr)->counter) \
@@ -180,7 +179,6 @@ static inline int __atomic_add_unless(atomic_t *v, int a, int u)
180 \ 179 \
181 typecheck(atomic64_t *, ptr); \ 180 typecheck(atomic64_t *, ptr); \
182 asm volatile( \ 181 asm volatile( \
183 __barrier \
184 op_string " %0,%2,%1\n" \ 182 op_string " %0,%2,%1\n" \
185 __barrier \ 183 __barrier \
186 : "=d" (old_val), "+Q" ((ptr)->counter) \ 184 : "=d" (old_val), "+Q" ((ptr)->counter) \
diff --git a/arch/s390/include/asm/barrier.h b/arch/s390/include/asm/barrier.h
index d48fe0162331..d68e11e0df5e 100644
--- a/arch/s390/include/asm/barrier.h
+++ b/arch/s390/include/asm/barrier.h
@@ -22,10 +22,10 @@
22 22
23#define mb() do { asm volatile(__ASM_BARRIER : : : "memory"); } while (0) 23#define mb() do { asm volatile(__ASM_BARRIER : : : "memory"); } while (0)
24 24
25#define rmb() mb() 25#define rmb() barrier()
26#define wmb() mb() 26#define wmb() barrier()
27#define dma_rmb() rmb() 27#define dma_rmb() mb()
28#define dma_wmb() wmb() 28#define dma_wmb() mb()
29#define smp_mb() mb() 29#define smp_mb() mb()
30#define smp_rmb() rmb() 30#define smp_rmb() rmb()
31#define smp_wmb() wmb() 31#define smp_wmb() wmb()
diff --git a/arch/s390/include/asm/bitops.h b/arch/s390/include/asm/bitops.h
index 9b68e98a724f..8043f10da6b5 100644
--- a/arch/s390/include/asm/bitops.h
+++ b/arch/s390/include/asm/bitops.h
@@ -11,30 +11,25 @@
11 * big-endian system because, unlike little endian, the number of each 11 * big-endian system because, unlike little endian, the number of each
12 * bit depends on the word size. 12 * bit depends on the word size.
13 * 13 *
14 * The bitop functions are defined to work on unsigned longs, so for an 14 * The bitop functions are defined to work on unsigned longs, so the bits
15 * s390x system the bits end up numbered: 15 * end up numbered:
16 * |63..............0|127............64|191...........128|255...........192| 16 * |63..............0|127............64|191...........128|255...........192|
17 * and on s390:
18 * |31.....0|63....32|95....64|127...96|159..128|191..160|223..192|255..224|
19 * 17 *
20 * There are a few little-endian macros used mostly for filesystem 18 * There are a few little-endian macros used mostly for filesystem
21 * bitmaps, these work on similar bit arrays layouts, but 19 * bitmaps, these work on similar bit array layouts, but byte-oriented:
22 * byte-oriented:
23 * |7...0|15...8|23...16|31...24|39...32|47...40|55...48|63...56| 20 * |7...0|15...8|23...16|31...24|39...32|47...40|55...48|63...56|
24 * 21 *
25 * The main difference is that bit 3-5 (64b) or 3-4 (32b) in the bit 22 * The main difference is that bit 3-5 in the bit number field needs to be
26 * number field needs to be reversed compared to the big-endian bit 23 * reversed compared to the big-endian bit fields. This can be achieved by
27 * fields. This can be achieved by XOR with 0x38 (64b) or 0x18 (32b). 24 * XOR with 0x38.
28 * 25 *
29 * We also have special functions which work with an MSB0 encoding: 26 * We also have special functions which work with an MSB0 encoding.
30 * on an s390x system the bits are numbered: 27 * The bits are numbered:
31 * |0..............63|64............127|128...........191|192...........255| 28 * |0..............63|64............127|128...........191|192...........255|
32 * and on s390:
33 * |0.....31|32....63|64....95|96...127|128..159|160..191|192..223|224..255|
34 * 29 *
35 * The main difference is that bit 0-63 (64b) or 0-31 (32b) in the bit 30 * The main difference is that bit 0-63 in the bit number field needs to be
36 * number field needs to be reversed compared to the LSB0 encoded bit 31 * reversed compared to the LSB0 encoded bit fields. This can be achieved by
37 * fields. This can be achieved by XOR with 0x3f (64b) or 0x1f (32b). 32 * XOR with 0x3f.
38 * 33 *
39 */ 34 */
40 35
@@ -64,7 +59,6 @@
64 \ 59 \
65 typecheck(unsigned long *, (__addr)); \ 60 typecheck(unsigned long *, (__addr)); \
66 asm volatile( \ 61 asm volatile( \
67 __barrier \
68 __op_string " %0,%2,%1\n" \ 62 __op_string " %0,%2,%1\n" \
69 __barrier \ 63 __barrier \
70 : "=d" (__old), "+Q" (*(__addr)) \ 64 : "=d" (__old), "+Q" (*(__addr)) \
@@ -276,12 +270,32 @@ static inline int test_bit(unsigned long nr, const volatile unsigned long *ptr)
276 return (*addr >> (nr & 7)) & 1; 270 return (*addr >> (nr & 7)) & 1;
277} 271}
278 272
273static inline int test_and_set_bit_lock(unsigned long nr,
274 volatile unsigned long *ptr)
275{
276 if (test_bit(nr, ptr))
277 return 1;
278 return test_and_set_bit(nr, ptr);
279}
280
281static inline void clear_bit_unlock(unsigned long nr,
282 volatile unsigned long *ptr)
283{
284 smp_mb__before_atomic();
285 clear_bit(nr, ptr);
286}
287
288static inline void __clear_bit_unlock(unsigned long nr,
289 volatile unsigned long *ptr)
290{
291 smp_mb();
292 __clear_bit(nr, ptr);
293}
294
279/* 295/*
280 * Functions which use MSB0 bit numbering. 296 * Functions which use MSB0 bit numbering.
281 * On an s390x system the bits are numbered: 297 * The bits are numbered:
282 * |0..............63|64............127|128...........191|192...........255| 298 * |0..............63|64............127|128...........191|192...........255|
283 * and on s390:
284 * |0.....31|32....63|64....95|96...127|128..159|160..191|192..223|224..255|
285 */ 299 */
286unsigned long find_first_bit_inv(const unsigned long *addr, unsigned long size); 300unsigned long find_first_bit_inv(const unsigned long *addr, unsigned long size);
287unsigned long find_next_bit_inv(const unsigned long *addr, unsigned long size, 301unsigned long find_next_bit_inv(const unsigned long *addr, unsigned long size,
@@ -446,7 +460,6 @@ static inline int fls(int word)
446#include <asm-generic/bitops/ffz.h> 460#include <asm-generic/bitops/ffz.h>
447#include <asm-generic/bitops/find.h> 461#include <asm-generic/bitops/find.h>
448#include <asm-generic/bitops/hweight.h> 462#include <asm-generic/bitops/hweight.h>
449#include <asm-generic/bitops/lock.h>
450#include <asm-generic/bitops/sched.h> 463#include <asm-generic/bitops/sched.h>
451#include <asm-generic/bitops/le.h> 464#include <asm-generic/bitops/le.h>
452#include <asm-generic/bitops/ext2-atomic-setbit.h> 465#include <asm-generic/bitops/ext2-atomic-setbit.h>
diff --git a/arch/s390/include/asm/cio.h b/arch/s390/include/asm/cio.h
index 096339207764..0c5d8ee657f0 100644
--- a/arch/s390/include/asm/cio.h
+++ b/arch/s390/include/asm/cio.h
@@ -5,6 +5,7 @@
5#define _ASM_S390_CIO_H_ 5#define _ASM_S390_CIO_H_
6 6
7#include <linux/spinlock.h> 7#include <linux/spinlock.h>
8#include <linux/bitops.h>
8#include <asm/types.h> 9#include <asm/types.h>
9 10
10#define LPM_ANYPATH 0xff 11#define LPM_ANYPATH 0xff
@@ -296,6 +297,15 @@ static inline int ccw_dev_id_is_equal(struct ccw_dev_id *dev_id1,
296 return 0; 297 return 0;
297} 298}
298 299
300/**
301 * pathmask_to_pos() - find the position of the left-most bit in a pathmask
302 * @mask: pathmask with at least one bit set
303 */
304static inline u8 pathmask_to_pos(u8 mask)
305{
306 return 8 - ffs(mask);
307}
308
299void channel_subsystem_reinit(void); 309void channel_subsystem_reinit(void);
300extern void css_schedule_reprobe(void); 310extern void css_schedule_reprobe(void);
301 311
diff --git a/arch/s390/include/asm/cmb.h b/arch/s390/include/asm/cmb.h
index 806eac12e3bd..ed2630c23f90 100644
--- a/arch/s390/include/asm/cmb.h
+++ b/arch/s390/include/asm/cmb.h
@@ -6,6 +6,7 @@
6struct ccw_device; 6struct ccw_device;
7extern int enable_cmf(struct ccw_device *cdev); 7extern int enable_cmf(struct ccw_device *cdev);
8extern int disable_cmf(struct ccw_device *cdev); 8extern int disable_cmf(struct ccw_device *cdev);
9extern int __disable_cmf(struct ccw_device *cdev);
9extern u64 cmf_read(struct ccw_device *cdev, int index); 10extern u64 cmf_read(struct ccw_device *cdev, int index);
10extern int cmf_readall(struct ccw_device *cdev, struct cmbdata *data); 11extern int cmf_readall(struct ccw_device *cdev, struct cmbdata *data);
11 12
diff --git a/arch/s390/include/asm/cmpxchg.h b/arch/s390/include/asm/cmpxchg.h
index 411464f4c97a..24ea6948e32b 100644
--- a/arch/s390/include/asm/cmpxchg.h
+++ b/arch/s390/include/asm/cmpxchg.h
@@ -32,7 +32,7 @@
32 __old; \ 32 __old; \
33}) 33})
34 34
35#define __cmpxchg_double_op(p1, p2, o1, o2, n1, n2, insn) \ 35#define __cmpxchg_double(p1, p2, o1, o2, n1, n2) \
36({ \ 36({ \
37 register __typeof__(*(p1)) __old1 asm("2") = (o1); \ 37 register __typeof__(*(p1)) __old1 asm("2") = (o1); \
38 register __typeof__(*(p2)) __old2 asm("3") = (o2); \ 38 register __typeof__(*(p2)) __old2 asm("3") = (o2); \
@@ -40,7 +40,7 @@
40 register __typeof__(*(p2)) __new2 asm("5") = (n2); \ 40 register __typeof__(*(p2)) __new2 asm("5") = (n2); \
41 int cc; \ 41 int cc; \
42 asm volatile( \ 42 asm volatile( \
43 insn " %[old],%[new],%[ptr]\n" \ 43 " cdsg %[old],%[new],%[ptr]\n" \
44 " ipm %[cc]\n" \ 44 " ipm %[cc]\n" \
45 " srl %[cc],28" \ 45 " srl %[cc],28" \
46 : [cc] "=d" (cc), [old] "+d" (__old1), "+d" (__old2) \ 46 : [cc] "=d" (cc), [old] "+d" (__old1), "+d" (__old2) \
@@ -50,30 +50,6 @@
50 !cc; \ 50 !cc; \
51}) 51})
52 52
53#define __cmpxchg_double_4(p1, p2, o1, o2, n1, n2) \
54 __cmpxchg_double_op(p1, p2, o1, o2, n1, n2, "cds")
55
56#define __cmpxchg_double_8(p1, p2, o1, o2, n1, n2) \
57 __cmpxchg_double_op(p1, p2, o1, o2, n1, n2, "cdsg")
58
59extern void __cmpxchg_double_called_with_bad_pointer(void);
60
61#define __cmpxchg_double(p1, p2, o1, o2, n1, n2) \
62({ \
63 int __ret; \
64 switch (sizeof(*(p1))) { \
65 case 4: \
66 __ret = __cmpxchg_double_4(p1, p2, o1, o2, n1, n2); \
67 break; \
68 case 8: \
69 __ret = __cmpxchg_double_8(p1, p2, o1, o2, n1, n2); \
70 break; \
71 default: \
72 __cmpxchg_double_called_with_bad_pointer(); \
73 } \
74 __ret; \
75})
76
77#define cmpxchg_double(p1, p2, o1, o2, n1, n2) \ 53#define cmpxchg_double(p1, p2, o1, o2, n1, n2) \
78({ \ 54({ \
79 __typeof__(p1) __p1 = (p1); \ 55 __typeof__(p1) __p1 = (p1); \
@@ -81,7 +57,7 @@ extern void __cmpxchg_double_called_with_bad_pointer(void);
81 BUILD_BUG_ON(sizeof(*(p1)) != sizeof(long)); \ 57 BUILD_BUG_ON(sizeof(*(p1)) != sizeof(long)); \
82 BUILD_BUG_ON(sizeof(*(p2)) != sizeof(long)); \ 58 BUILD_BUG_ON(sizeof(*(p2)) != sizeof(long)); \
83 VM_BUG_ON((unsigned long)((__p1) + 1) != (unsigned long)(__p2));\ 59 VM_BUG_ON((unsigned long)((__p1) + 1) != (unsigned long)(__p2));\
84 __cmpxchg_double_8(__p1, __p2, o1, o2, n1, n2); \ 60 __cmpxchg_double(__p1, __p2, o1, o2, n1, n2); \
85}) 61})
86 62
87#define system_has_cmpxchg_double() 1 63#define system_has_cmpxchg_double() 1
diff --git a/arch/s390/include/asm/cpu_mf.h b/arch/s390/include/asm/cpu_mf.h
index 5243a8679a1d..9dd04b9e9782 100644
--- a/arch/s390/include/asm/cpu_mf.h
+++ b/arch/s390/include/asm/cpu_mf.h
@@ -22,15 +22,10 @@
22#define CPU_MF_INT_SF_LSDA (1 << 22) /* loss of sample data alert */ 22#define CPU_MF_INT_SF_LSDA (1 << 22) /* loss of sample data alert */
23#define CPU_MF_INT_CF_CACA (1 << 7) /* counter auth. change alert */ 23#define CPU_MF_INT_CF_CACA (1 << 7) /* counter auth. change alert */
24#define CPU_MF_INT_CF_LCDA (1 << 6) /* loss of counter data alert */ 24#define CPU_MF_INT_CF_LCDA (1 << 6) /* loss of counter data alert */
25#define CPU_MF_INT_RI_HALTED (1 << 5) /* run-time instr. halted */
26#define CPU_MF_INT_RI_BUF_FULL (1 << 4) /* run-time instr. program
27 buffer full */
28
29#define CPU_MF_INT_CF_MASK (CPU_MF_INT_CF_CACA|CPU_MF_INT_CF_LCDA) 25#define CPU_MF_INT_CF_MASK (CPU_MF_INT_CF_CACA|CPU_MF_INT_CF_LCDA)
30#define CPU_MF_INT_SF_MASK (CPU_MF_INT_SF_IAE|CPU_MF_INT_SF_ISE| \ 26#define CPU_MF_INT_SF_MASK (CPU_MF_INT_SF_IAE|CPU_MF_INT_SF_ISE| \
31 CPU_MF_INT_SF_PRA|CPU_MF_INT_SF_SACA| \ 27 CPU_MF_INT_SF_PRA|CPU_MF_INT_SF_SACA| \
32 CPU_MF_INT_SF_LSDA) 28 CPU_MF_INT_SF_LSDA)
33#define CPU_MF_INT_RI_MASK (CPU_MF_INT_RI_HALTED|CPU_MF_INT_RI_BUF_FULL)
34 29
35/* CPU measurement facility support */ 30/* CPU measurement facility support */
36static inline int cpum_cf_avail(void) 31static inline int cpum_cf_avail(void)
diff --git a/arch/s390/include/asm/ctl_reg.h b/arch/s390/include/asm/ctl_reg.h
index 17a373576868..d7697ab802f6 100644
--- a/arch/s390/include/asm/ctl_reg.h
+++ b/arch/s390/include/asm/ctl_reg.h
@@ -46,8 +46,6 @@ static inline void __ctl_clear_bit(unsigned int cr, unsigned int bit)
46 __ctl_load(reg, cr, cr); 46 __ctl_load(reg, cr, cr);
47} 47}
48 48
49void __ctl_set_vx(void);
50
51void smp_ctl_set_bit(int cr, int bit); 49void smp_ctl_set_bit(int cr, int bit);
52void smp_ctl_clear_bit(int cr, int bit); 50void smp_ctl_clear_bit(int cr, int bit);
53 51
diff --git a/arch/s390/include/asm/diag.h b/arch/s390/include/asm/diag.h
index 7e91c58072e2..5fac921c1c42 100644
--- a/arch/s390/include/asm/diag.h
+++ b/arch/s390/include/asm/diag.h
@@ -8,6 +8,34 @@
8#ifndef _ASM_S390_DIAG_H 8#ifndef _ASM_S390_DIAG_H
9#define _ASM_S390_DIAG_H 9#define _ASM_S390_DIAG_H
10 10
11#include <linux/percpu.h>
12
13enum diag_stat_enum {
14 DIAG_STAT_X008,
15 DIAG_STAT_X00C,
16 DIAG_STAT_X010,
17 DIAG_STAT_X014,
18 DIAG_STAT_X044,
19 DIAG_STAT_X064,
20 DIAG_STAT_X09C,
21 DIAG_STAT_X0DC,
22 DIAG_STAT_X204,
23 DIAG_STAT_X210,
24 DIAG_STAT_X224,
25 DIAG_STAT_X250,
26 DIAG_STAT_X258,
27 DIAG_STAT_X288,
28 DIAG_STAT_X2C4,
29 DIAG_STAT_X2FC,
30 DIAG_STAT_X304,
31 DIAG_STAT_X308,
32 DIAG_STAT_X500,
33 NR_DIAG_STAT
34};
35
36void diag_stat_inc(enum diag_stat_enum nr);
37void diag_stat_inc_norecursion(enum diag_stat_enum nr);
38
11/* 39/*
12 * Diagnose 10: Release page range 40 * Diagnose 10: Release page range
13 */ 41 */
@@ -18,6 +46,7 @@ static inline void diag10_range(unsigned long start_pfn, unsigned long num_pfn)
18 start_addr = start_pfn << PAGE_SHIFT; 46 start_addr = start_pfn << PAGE_SHIFT;
19 end_addr = (start_pfn + num_pfn - 1) << PAGE_SHIFT; 47 end_addr = (start_pfn + num_pfn - 1) << PAGE_SHIFT;
20 48
49 diag_stat_inc(DIAG_STAT_X010);
21 asm volatile( 50 asm volatile(
22 "0: diag %0,%1,0x10\n" 51 "0: diag %0,%1,0x10\n"
23 "1:\n" 52 "1:\n"
diff --git a/arch/s390/include/asm/etr.h b/arch/s390/include/asm/etr.h
index f7e5c36688c3..105f90e63a0e 100644
--- a/arch/s390/include/asm/etr.h
+++ b/arch/s390/include/asm/etr.h
@@ -211,8 +211,9 @@ static inline int etr_ptff(void *ptff_block, unsigned int func)
211#define ETR_PTFF_SGS 0x43 /* set gross steering rate */ 211#define ETR_PTFF_SGS 0x43 /* set gross steering rate */
212 212
213/* Functions needed by the machine check handler */ 213/* Functions needed by the machine check handler */
214void etr_switch_to_local(void); 214int etr_switch_to_local(void);
215void etr_sync_check(void); 215int etr_sync_check(void);
216void etr_queue_work(void);
216 217
217/* notifier for syncs */ 218/* notifier for syncs */
218extern struct atomic_notifier_head s390_epoch_delta_notifier; 219extern struct atomic_notifier_head s390_epoch_delta_notifier;
@@ -253,7 +254,8 @@ struct stp_sstpi {
253} __attribute__ ((packed)); 254} __attribute__ ((packed));
254 255
255/* Functions needed by the machine check handler */ 256/* Functions needed by the machine check handler */
256void stp_sync_check(void); 257int stp_sync_check(void);
257void stp_island_check(void); 258int stp_island_check(void);
259void stp_queue_work(void);
258 260
259#endif /* __S390_ETR_H */ 261#endif /* __S390_ETR_H */
diff --git a/arch/s390/include/asm/fpu/api.h b/arch/s390/include/asm/fpu/api.h
new file mode 100644
index 000000000000..5e04f3cbd320
--- /dev/null
+++ b/arch/s390/include/asm/fpu/api.h
@@ -0,0 +1,30 @@
1/*
2 * In-kernel FPU support functions
3 *
4 * Copyright IBM Corp. 2015
5 * Author(s): Hendrik Brueckner <brueckner@linux.vnet.ibm.com>
6 */
7
8#ifndef _ASM_S390_FPU_API_H
9#define _ASM_S390_FPU_API_H
10
11void save_fpu_regs(void);
12
13static inline int test_fp_ctl(u32 fpc)
14{
15 u32 orig_fpc;
16 int rc;
17
18 asm volatile(
19 " efpc %1\n"
20 " sfpc %2\n"
21 "0: sfpc %1\n"
22 " la %0,0\n"
23 "1:\n"
24 EX_TABLE(0b,1b)
25 : "=d" (rc), "=d" (orig_fpc)
26 : "d" (fpc), "0" (-EINVAL));
27 return rc;
28}
29
30#endif /* _ASM_S390_FPU_API_H */
diff --git a/arch/s390/include/asm/fpu-internal.h b/arch/s390/include/asm/fpu/internal.h
index 55dc2c0fb40a..2559b16da525 100644
--- a/arch/s390/include/asm/fpu-internal.h
+++ b/arch/s390/include/asm/fpu/internal.h
@@ -1,5 +1,5 @@
1/* 1/*
2 * General floating pointer and vector register helpers 2 * FPU state and register content conversion primitives
3 * 3 *
4 * Copyright IBM Corp. 2015 4 * Copyright IBM Corp. 2015
5 * Author(s): Hendrik Brueckner <brueckner@linux.vnet.ibm.com> 5 * Author(s): Hendrik Brueckner <brueckner@linux.vnet.ibm.com>
@@ -8,50 +8,9 @@
8#ifndef _ASM_S390_FPU_INTERNAL_H 8#ifndef _ASM_S390_FPU_INTERNAL_H
9#define _ASM_S390_FPU_INTERNAL_H 9#define _ASM_S390_FPU_INTERNAL_H
10 10
11#define FPU_USE_VX 1 /* Vector extension is active */
12
13#ifndef __ASSEMBLY__
14
15#include <linux/errno.h>
16#include <linux/string.h> 11#include <linux/string.h>
17#include <asm/linkage.h>
18#include <asm/ctl_reg.h> 12#include <asm/ctl_reg.h>
19#include <asm/sigcontext.h> 13#include <asm/fpu/types.h>
20
21struct fpu {
22 __u32 fpc; /* Floating-point control */
23 __u32 flags;
24 union {
25 void *regs;
26 freg_t *fprs; /* Floating-point register save area */
27 __vector128 *vxrs; /* Vector register save area */
28 };
29};
30
31void save_fpu_regs(void);
32
33#define is_vx_fpu(fpu) (!!((fpu)->flags & FPU_USE_VX))
34#define is_vx_task(tsk) (!!((tsk)->thread.fpu.flags & FPU_USE_VX))
35
36/* VX array structure for address operand constraints in inline assemblies */
37struct vx_array { __vector128 _[__NUM_VXRS]; };
38
39static inline int test_fp_ctl(u32 fpc)
40{
41 u32 orig_fpc;
42 int rc;
43
44 asm volatile(
45 " efpc %1\n"
46 " sfpc %2\n"
47 "0: sfpc %1\n"
48 " la %0,0\n"
49 "1:\n"
50 EX_TABLE(0b,1b)
51 : "=d" (rc), "=d" (orig_fpc)
52 : "d" (fpc), "0" (-EINVAL));
53 return rc;
54}
55 14
56static inline void save_vx_regs_safe(__vector128 *vxrs) 15static inline void save_vx_regs_safe(__vector128 *vxrs)
57{ 16{
@@ -89,7 +48,7 @@ static inline void convert_fp_to_vx(__vector128 *vxrs, freg_t *fprs)
89static inline void fpregs_store(_s390_fp_regs *fpregs, struct fpu *fpu) 48static inline void fpregs_store(_s390_fp_regs *fpregs, struct fpu *fpu)
90{ 49{
91 fpregs->pad = 0; 50 fpregs->pad = 0;
92 if (is_vx_fpu(fpu)) 51 if (MACHINE_HAS_VX)
93 convert_vx_to_fp((freg_t *)&fpregs->fprs, fpu->vxrs); 52 convert_vx_to_fp((freg_t *)&fpregs->fprs, fpu->vxrs);
94 else 53 else
95 memcpy((freg_t *)&fpregs->fprs, fpu->fprs, 54 memcpy((freg_t *)&fpregs->fprs, fpu->fprs,
@@ -98,13 +57,11 @@ static inline void fpregs_store(_s390_fp_regs *fpregs, struct fpu *fpu)
98 57
99static inline void fpregs_load(_s390_fp_regs *fpregs, struct fpu *fpu) 58static inline void fpregs_load(_s390_fp_regs *fpregs, struct fpu *fpu)
100{ 59{
101 if (is_vx_fpu(fpu)) 60 if (MACHINE_HAS_VX)
102 convert_fp_to_vx(fpu->vxrs, (freg_t *)&fpregs->fprs); 61 convert_fp_to_vx(fpu->vxrs, (freg_t *)&fpregs->fprs);
103 else 62 else
104 memcpy(fpu->fprs, (freg_t *)&fpregs->fprs, 63 memcpy(fpu->fprs, (freg_t *)&fpregs->fprs,
105 sizeof(fpregs->fprs)); 64 sizeof(fpregs->fprs));
106} 65}
107 66
108#endif
109
110#endif /* _ASM_S390_FPU_INTERNAL_H */ 67#endif /* _ASM_S390_FPU_INTERNAL_H */
diff --git a/arch/s390/include/asm/fpu/types.h b/arch/s390/include/asm/fpu/types.h
new file mode 100644
index 000000000000..14a8b0c14f87
--- /dev/null
+++ b/arch/s390/include/asm/fpu/types.h
@@ -0,0 +1,25 @@
1/*
2 * FPU data structures
3 *
4 * Copyright IBM Corp. 2015
5 * Author(s): Hendrik Brueckner <brueckner@linux.vnet.ibm.com>
6 */
7
8#ifndef _ASM_S390_FPU_TYPES_H
9#define _ASM_S390_FPU_TYPES_H
10
11#include <asm/sigcontext.h>
12
13struct fpu {
14 __u32 fpc; /* Floating-point control */
15 union {
16 void *regs;
17 freg_t *fprs; /* Floating-point register save area */
18 __vector128 *vxrs; /* Vector register save area */
19 };
20};
21
22/* VX array structure for address operand constraints in inline assemblies */
23struct vx_array { __vector128 _[__NUM_VXRS]; };
24
25#endif /* _ASM_S390_FPU_TYPES_H */
diff --git a/arch/s390/include/asm/idle.h b/arch/s390/include/asm/idle.h
index 113cd963dbbe..51ff96d9f287 100644
--- a/arch/s390/include/asm/idle.h
+++ b/arch/s390/include/asm/idle.h
@@ -24,4 +24,6 @@ struct s390_idle_data {
24extern struct device_attribute dev_attr_idle_count; 24extern struct device_attribute dev_attr_idle_count;
25extern struct device_attribute dev_attr_idle_time_us; 25extern struct device_attribute dev_attr_idle_time_us;
26 26
27void psw_idle(struct s390_idle_data *, unsigned long);
28
27#endif /* _S390_IDLE_H */ 29#endif /* _S390_IDLE_H */
diff --git a/arch/s390/include/asm/irq.h b/arch/s390/include/asm/irq.h
index ff95d15a2384..f97b055de76a 100644
--- a/arch/s390/include/asm/irq.h
+++ b/arch/s390/include/asm/irq.h
@@ -47,7 +47,6 @@ enum interruption_class {
47 IRQEXT_IUC, 47 IRQEXT_IUC,
48 IRQEXT_CMS, 48 IRQEXT_CMS,
49 IRQEXT_CMC, 49 IRQEXT_CMC,
50 IRQEXT_CMR,
51 IRQEXT_FTP, 50 IRQEXT_FTP,
52 IRQIO_CIO, 51 IRQIO_CIO,
53 IRQIO_QAI, 52 IRQIO_QAI,
@@ -96,6 +95,19 @@ enum irq_subclass {
96 IRQ_SUBCLASS_SERVICE_SIGNAL = 9, 95 IRQ_SUBCLASS_SERVICE_SIGNAL = 9,
97}; 96};
98 97
98#define CR0_IRQ_SUBCLASS_MASK \
99 ((1UL << (63 - 30)) /* Warning Track */ | \
100 (1UL << (63 - 48)) /* Malfunction Alert */ | \
101 (1UL << (63 - 49)) /* Emergency Signal */ | \
102 (1UL << (63 - 50)) /* External Call */ | \
103 (1UL << (63 - 52)) /* Clock Comparator */ | \
104 (1UL << (63 - 53)) /* CPU Timer */ | \
105 (1UL << (63 - 54)) /* Service Signal */ | \
106 (1UL << (63 - 57)) /* Interrupt Key */ | \
107 (1UL << (63 - 58)) /* Measurement Alert */ | \
108 (1UL << (63 - 59)) /* Timing Alert */ | \
109 (1UL << (63 - 62))) /* IUCV */
110
99void irq_subclass_register(enum irq_subclass subclass); 111void irq_subclass_register(enum irq_subclass subclass);
100void irq_subclass_unregister(enum irq_subclass subclass); 112void irq_subclass_unregister(enum irq_subclass subclass);
101 113
diff --git a/arch/s390/include/asm/kvm_host.h b/arch/s390/include/asm/kvm_host.h
index 8ced426091e1..7f654308817c 100644
--- a/arch/s390/include/asm/kvm_host.h
+++ b/arch/s390/include/asm/kvm_host.h
@@ -22,7 +22,7 @@
22#include <linux/kvm.h> 22#include <linux/kvm.h>
23#include <asm/debug.h> 23#include <asm/debug.h>
24#include <asm/cpu.h> 24#include <asm/cpu.h>
25#include <asm/fpu-internal.h> 25#include <asm/fpu/api.h>
26#include <asm/isc.h> 26#include <asm/isc.h>
27 27
28#define KVM_MAX_VCPUS 64 28#define KVM_MAX_VCPUS 64
diff --git a/arch/s390/include/asm/kvm_para.h b/arch/s390/include/asm/kvm_para.h
index e0f842308a68..41393052ac57 100644
--- a/arch/s390/include/asm/kvm_para.h
+++ b/arch/s390/include/asm/kvm_para.h
@@ -27,10 +27,9 @@
27#define __S390_KVM_PARA_H 27#define __S390_KVM_PARA_H
28 28
29#include <uapi/asm/kvm_para.h> 29#include <uapi/asm/kvm_para.h>
30#include <asm/diag.h>
30 31
31 32static inline long __kvm_hypercall0(unsigned long nr)
32
33static inline long kvm_hypercall0(unsigned long nr)
34{ 33{
35 register unsigned long __nr asm("1") = nr; 34 register unsigned long __nr asm("1") = nr;
36 register long __rc asm("2"); 35 register long __rc asm("2");
@@ -40,7 +39,13 @@ static inline long kvm_hypercall0(unsigned long nr)
40 return __rc; 39 return __rc;
41} 40}
42 41
43static inline long kvm_hypercall1(unsigned long nr, unsigned long p1) 42static inline long kvm_hypercall0(unsigned long nr)
43{
44 diag_stat_inc(DIAG_STAT_X500);
45 return __kvm_hypercall0(nr);
46}
47
48static inline long __kvm_hypercall1(unsigned long nr, unsigned long p1)
44{ 49{
45 register unsigned long __nr asm("1") = nr; 50 register unsigned long __nr asm("1") = nr;
46 register unsigned long __p1 asm("2") = p1; 51 register unsigned long __p1 asm("2") = p1;
@@ -51,7 +56,13 @@ static inline long kvm_hypercall1(unsigned long nr, unsigned long p1)
51 return __rc; 56 return __rc;
52} 57}
53 58
54static inline long kvm_hypercall2(unsigned long nr, unsigned long p1, 59static inline long kvm_hypercall1(unsigned long nr, unsigned long p1)
60{
61 diag_stat_inc(DIAG_STAT_X500);
62 return __kvm_hypercall1(nr, p1);
63}
64
65static inline long __kvm_hypercall2(unsigned long nr, unsigned long p1,
55 unsigned long p2) 66 unsigned long p2)
56{ 67{
57 register unsigned long __nr asm("1") = nr; 68 register unsigned long __nr asm("1") = nr;
@@ -65,7 +76,14 @@ static inline long kvm_hypercall2(unsigned long nr, unsigned long p1,
65 return __rc; 76 return __rc;
66} 77}
67 78
68static inline long kvm_hypercall3(unsigned long nr, unsigned long p1, 79static inline long kvm_hypercall2(unsigned long nr, unsigned long p1,
80 unsigned long p2)
81{
82 diag_stat_inc(DIAG_STAT_X500);
83 return __kvm_hypercall2(nr, p1, p2);
84}
85
86static inline long __kvm_hypercall3(unsigned long nr, unsigned long p1,
69 unsigned long p2, unsigned long p3) 87 unsigned long p2, unsigned long p3)
70{ 88{
71 register unsigned long __nr asm("1") = nr; 89 register unsigned long __nr asm("1") = nr;
@@ -80,8 +98,14 @@ static inline long kvm_hypercall3(unsigned long nr, unsigned long p1,
80 return __rc; 98 return __rc;
81} 99}
82 100
101static inline long kvm_hypercall3(unsigned long nr, unsigned long p1,
102 unsigned long p2, unsigned long p3)
103{
104 diag_stat_inc(DIAG_STAT_X500);
105 return __kvm_hypercall3(nr, p1, p2, p3);
106}
83 107
84static inline long kvm_hypercall4(unsigned long nr, unsigned long p1, 108static inline long __kvm_hypercall4(unsigned long nr, unsigned long p1,
85 unsigned long p2, unsigned long p3, 109 unsigned long p2, unsigned long p3,
86 unsigned long p4) 110 unsigned long p4)
87{ 111{
@@ -98,7 +122,15 @@ static inline long kvm_hypercall4(unsigned long nr, unsigned long p1,
98 return __rc; 122 return __rc;
99} 123}
100 124
101static inline long kvm_hypercall5(unsigned long nr, unsigned long p1, 125static inline long kvm_hypercall4(unsigned long nr, unsigned long p1,
126 unsigned long p2, unsigned long p3,
127 unsigned long p4)
128{
129 diag_stat_inc(DIAG_STAT_X500);
130 return __kvm_hypercall4(nr, p1, p2, p3, p4);
131}
132
133static inline long __kvm_hypercall5(unsigned long nr, unsigned long p1,
102 unsigned long p2, unsigned long p3, 134 unsigned long p2, unsigned long p3,
103 unsigned long p4, unsigned long p5) 135 unsigned long p4, unsigned long p5)
104{ 136{
@@ -116,7 +148,15 @@ static inline long kvm_hypercall5(unsigned long nr, unsigned long p1,
116 return __rc; 148 return __rc;
117} 149}
118 150
119static inline long kvm_hypercall6(unsigned long nr, unsigned long p1, 151static inline long kvm_hypercall5(unsigned long nr, unsigned long p1,
152 unsigned long p2, unsigned long p3,
153 unsigned long p4, unsigned long p5)
154{
155 diag_stat_inc(DIAG_STAT_X500);
156 return __kvm_hypercall5(nr, p1, p2, p3, p4, p5);
157}
158
159static inline long __kvm_hypercall6(unsigned long nr, unsigned long p1,
120 unsigned long p2, unsigned long p3, 160 unsigned long p2, unsigned long p3,
121 unsigned long p4, unsigned long p5, 161 unsigned long p4, unsigned long p5,
122 unsigned long p6) 162 unsigned long p6)
@@ -137,6 +177,15 @@ static inline long kvm_hypercall6(unsigned long nr, unsigned long p1,
137 return __rc; 177 return __rc;
138} 178}
139 179
180static inline long kvm_hypercall6(unsigned long nr, unsigned long p1,
181 unsigned long p2, unsigned long p3,
182 unsigned long p4, unsigned long p5,
183 unsigned long p6)
184{
185 diag_stat_inc(DIAG_STAT_X500);
186 return __kvm_hypercall6(nr, p1, p2, p3, p4, p5, p6);
187}
188
140/* kvm on s390 is always paravirtualization enabled */ 189/* kvm on s390 is always paravirtualization enabled */
141static inline int kvm_para_available(void) 190static inline int kvm_para_available(void)
142{ 191{
diff --git a/arch/s390/include/asm/lowcore.h b/arch/s390/include/asm/lowcore.h
index 663f23e37460..afe1cfebf1a4 100644
--- a/arch/s390/include/asm/lowcore.h
+++ b/arch/s390/include/asm/lowcore.h
@@ -67,7 +67,7 @@ struct _lowcore {
67 __u8 pad_0x00c4[0x00c8-0x00c4]; /* 0x00c4 */ 67 __u8 pad_0x00c4[0x00c8-0x00c4]; /* 0x00c4 */
68 __u32 stfl_fac_list; /* 0x00c8 */ 68 __u32 stfl_fac_list; /* 0x00c8 */
69 __u8 pad_0x00cc[0x00e8-0x00cc]; /* 0x00cc */ 69 __u8 pad_0x00cc[0x00e8-0x00cc]; /* 0x00cc */
70 __u32 mcck_interruption_code[2]; /* 0x00e8 */ 70 __u64 mcck_interruption_code; /* 0x00e8 */
71 __u8 pad_0x00f0[0x00f4-0x00f0]; /* 0x00f0 */ 71 __u8 pad_0x00f0[0x00f4-0x00f0]; /* 0x00f0 */
72 __u32 external_damage_code; /* 0x00f4 */ 72 __u32 external_damage_code; /* 0x00f4 */
73 __u64 failing_storage_address; /* 0x00f8 */ 73 __u64 failing_storage_address; /* 0x00f8 */
@@ -132,7 +132,14 @@ struct _lowcore {
132 /* Address space pointer. */ 132 /* Address space pointer. */
133 __u64 kernel_asce; /* 0x0358 */ 133 __u64 kernel_asce; /* 0x0358 */
134 __u64 user_asce; /* 0x0360 */ 134 __u64 user_asce; /* 0x0360 */
135 __u64 current_pid; /* 0x0368 */ 135
136 /*
137 * The lpp and current_pid fields form a
138 * 64-bit value that is set as program
139 * parameter with the LPP instruction.
140 */
141 __u32 lpp; /* 0x0368 */
142 __u32 current_pid; /* 0x036c */
136 143
137 /* SMP info area */ 144 /* SMP info area */
138 __u32 cpu_nr; /* 0x0370 */ 145 __u32 cpu_nr; /* 0x0370 */
diff --git a/arch/s390/include/asm/nmi.h b/arch/s390/include/asm/nmi.h
index 3027a5a72b74..b75fd910386a 100644
--- a/arch/s390/include/asm/nmi.h
+++ b/arch/s390/include/asm/nmi.h
@@ -11,51 +11,62 @@
11#ifndef _ASM_S390_NMI_H 11#ifndef _ASM_S390_NMI_H
12#define _ASM_S390_NMI_H 12#define _ASM_S390_NMI_H
13 13
14#include <linux/const.h>
14#include <linux/types.h> 15#include <linux/types.h>
15 16
16struct mci { 17#define MCCK_CODE_SYSTEM_DAMAGE _BITUL(63)
17 __u32 sd : 1; /* 00 system damage */ 18#define MCCK_CODE_CPU_TIMER_VALID _BITUL(63 - 46)
18 __u32 pd : 1; /* 01 instruction-processing damage */ 19#define MCCK_CODE_PSW_MWP_VALID _BITUL(63 - 20)
19 __u32 sr : 1; /* 02 system recovery */ 20#define MCCK_CODE_PSW_IA_VALID _BITUL(63 - 23)
20 __u32 : 1; /* 03 */ 21
21 __u32 cd : 1; /* 04 timing-facility damage */ 22#ifndef __ASSEMBLY__
22 __u32 ed : 1; /* 05 external damage */ 23
23 __u32 : 1; /* 06 */ 24union mci {
24 __u32 dg : 1; /* 07 degradation */ 25 unsigned long val;
25 __u32 w : 1; /* 08 warning pending */ 26 struct {
26 __u32 cp : 1; /* 09 channel-report pending */ 27 u64 sd : 1; /* 00 system damage */
27 __u32 sp : 1; /* 10 service-processor damage */ 28 u64 pd : 1; /* 01 instruction-processing damage */
28 __u32 ck : 1; /* 11 channel-subsystem damage */ 29 u64 sr : 1; /* 02 system recovery */
29 __u32 : 2; /* 12-13 */ 30 u64 : 1; /* 03 */
30 __u32 b : 1; /* 14 backed up */ 31 u64 cd : 1; /* 04 timing-facility damage */
31 __u32 : 1; /* 15 */ 32 u64 ed : 1; /* 05 external damage */
32 __u32 se : 1; /* 16 storage error uncorrected */ 33 u64 : 1; /* 06 */
33 __u32 sc : 1; /* 17 storage error corrected */ 34 u64 dg : 1; /* 07 degradation */
34 __u32 ke : 1; /* 18 storage-key error uncorrected */ 35 u64 w : 1; /* 08 warning pending */
35 __u32 ds : 1; /* 19 storage degradation */ 36 u64 cp : 1; /* 09 channel-report pending */
36 __u32 wp : 1; /* 20 psw mwp validity */ 37 u64 sp : 1; /* 10 service-processor damage */
37 __u32 ms : 1; /* 21 psw mask and key validity */ 38 u64 ck : 1; /* 11 channel-subsystem damage */
38 __u32 pm : 1; /* 22 psw program mask and cc validity */ 39 u64 : 2; /* 12-13 */
39 __u32 ia : 1; /* 23 psw instruction address validity */ 40 u64 b : 1; /* 14 backed up */
40 __u32 fa : 1; /* 24 failing storage address validity */ 41 u64 : 1; /* 15 */
41 __u32 vr : 1; /* 25 vector register validity */ 42 u64 se : 1; /* 16 storage error uncorrected */
42 __u32 ec : 1; /* 26 external damage code validity */ 43 u64 sc : 1; /* 17 storage error corrected */
43 __u32 fp : 1; /* 27 floating point register validity */ 44 u64 ke : 1; /* 18 storage-key error uncorrected */
44 __u32 gr : 1; /* 28 general register validity */ 45 u64 ds : 1; /* 19 storage degradation */
45 __u32 cr : 1; /* 29 control register validity */ 46 u64 wp : 1; /* 20 psw mwp validity */
46 __u32 : 1; /* 30 */ 47 u64 ms : 1; /* 21 psw mask and key validity */
47 __u32 st : 1; /* 31 storage logical validity */ 48 u64 pm : 1; /* 22 psw program mask and cc validity */
48 __u32 ie : 1; /* 32 indirect storage error */ 49 u64 ia : 1; /* 23 psw instruction address validity */
49 __u32 ar : 1; /* 33 access register validity */ 50 u64 fa : 1; /* 24 failing storage address validity */
50 __u32 da : 1; /* 34 delayed access exception */ 51 u64 vr : 1; /* 25 vector register validity */
51 __u32 : 7; /* 35-41 */ 52 u64 ec : 1; /* 26 external damage code validity */
52 __u32 pr : 1; /* 42 tod programmable register validity */ 53 u64 fp : 1; /* 27 floating point register validity */
53 __u32 fc : 1; /* 43 fp control register validity */ 54 u64 gr : 1; /* 28 general register validity */
54 __u32 ap : 1; /* 44 ancillary report */ 55 u64 cr : 1; /* 29 control register validity */
55 __u32 : 1; /* 45 */ 56 u64 : 1; /* 30 */
56 __u32 ct : 1; /* 46 cpu timer validity */ 57 u64 st : 1; /* 31 storage logical validity */
57 __u32 cc : 1; /* 47 clock comparator validity */ 58 u64 ie : 1; /* 32 indirect storage error */
58 __u32 : 16; /* 47-63 */ 59 u64 ar : 1; /* 33 access register validity */
60 u64 da : 1; /* 34 delayed access exception */
61 u64 : 7; /* 35-41 */
62 u64 pr : 1; /* 42 tod programmable register validity */
63 u64 fc : 1; /* 43 fp control register validity */
64 u64 ap : 1; /* 44 ancillary report */
65 u64 : 1; /* 45 */
66 u64 ct : 1; /* 46 cpu timer validity */
67 u64 cc : 1; /* 47 clock comparator validity */
68 u64 : 16; /* 47-63 */
69 };
59}; 70};
60 71
61struct pt_regs; 72struct pt_regs;
@@ -63,4 +74,5 @@ struct pt_regs;
63extern void s390_handle_mcck(void); 74extern void s390_handle_mcck(void);
64extern void s390_do_machine_check(struct pt_regs *regs); 75extern void s390_do_machine_check(struct pt_regs *regs);
65 76
77#endif /* __ASSEMBLY__ */
66#endif /* _ASM_S390_NMI_H */ 78#endif /* _ASM_S390_NMI_H */
diff --git a/arch/s390/include/asm/pgtable.h b/arch/s390/include/asm/pgtable.h
index bdb2f51124ed..024f85f947ae 100644
--- a/arch/s390/include/asm/pgtable.h
+++ b/arch/s390/include/asm/pgtable.h
@@ -193,9 +193,15 @@ static inline int is_module_addr(void *addr)
193#define _PAGE_UNUSED 0x080 /* SW bit for pgste usage state */ 193#define _PAGE_UNUSED 0x080 /* SW bit for pgste usage state */
194#define __HAVE_ARCH_PTE_SPECIAL 194#define __HAVE_ARCH_PTE_SPECIAL
195 195
196#ifdef CONFIG_MEM_SOFT_DIRTY
197#define _PAGE_SOFT_DIRTY 0x002 /* SW pte soft dirty bit */
198#else
199#define _PAGE_SOFT_DIRTY 0x000
200#endif
201
196/* Set of bits not changed in pte_modify */ 202/* Set of bits not changed in pte_modify */
197#define _PAGE_CHG_MASK (PAGE_MASK | _PAGE_SPECIAL | _PAGE_DIRTY | \ 203#define _PAGE_CHG_MASK (PAGE_MASK | _PAGE_SPECIAL | _PAGE_DIRTY | \
198 _PAGE_YOUNG) 204 _PAGE_YOUNG | _PAGE_SOFT_DIRTY)
199 205
200/* 206/*
201 * handle_pte_fault uses pte_present and pte_none to find out the pte type 207 * handle_pte_fault uses pte_present and pte_none to find out the pte type
@@ -285,6 +291,12 @@ static inline int is_module_addr(void *addr)
285#define _SEGMENT_ENTRY_READ 0x0002 /* SW segment read bit */ 291#define _SEGMENT_ENTRY_READ 0x0002 /* SW segment read bit */
286#define _SEGMENT_ENTRY_WRITE 0x0001 /* SW segment write bit */ 292#define _SEGMENT_ENTRY_WRITE 0x0001 /* SW segment write bit */
287 293
294#ifdef CONFIG_MEM_SOFT_DIRTY
295#define _SEGMENT_ENTRY_SOFT_DIRTY 0x4000 /* SW segment soft dirty bit */
296#else
297#define _SEGMENT_ENTRY_SOFT_DIRTY 0x0000 /* SW segment soft dirty bit */
298#endif
299
288/* 300/*
289 * Segment table entry encoding (R = read-only, I = invalid, y = young bit): 301 * Segment table entry encoding (R = read-only, I = invalid, y = young bit):
290 * dy..R...I...wr 302 * dy..R...I...wr
@@ -589,6 +601,43 @@ static inline int pmd_protnone(pmd_t pmd)
589} 601}
590#endif 602#endif
591 603
604static inline int pte_soft_dirty(pte_t pte)
605{
606 return pte_val(pte) & _PAGE_SOFT_DIRTY;
607}
608#define pte_swp_soft_dirty pte_soft_dirty
609
610static inline pte_t pte_mksoft_dirty(pte_t pte)
611{
612 pte_val(pte) |= _PAGE_SOFT_DIRTY;
613 return pte;
614}
615#define pte_swp_mksoft_dirty pte_mksoft_dirty
616
617static inline pte_t pte_clear_soft_dirty(pte_t pte)
618{
619 pte_val(pte) &= ~_PAGE_SOFT_DIRTY;
620 return pte;
621}
622#define pte_swp_clear_soft_dirty pte_clear_soft_dirty
623
624static inline int pmd_soft_dirty(pmd_t pmd)
625{
626 return pmd_val(pmd) & _SEGMENT_ENTRY_SOFT_DIRTY;
627}
628
629static inline pmd_t pmd_mksoft_dirty(pmd_t pmd)
630{
631 pmd_val(pmd) |= _SEGMENT_ENTRY_SOFT_DIRTY;
632 return pmd;
633}
634
635static inline pmd_t pmd_clear_soft_dirty(pmd_t pmd)
636{
637 pmd_val(pmd) &= ~_SEGMENT_ENTRY_SOFT_DIRTY;
638 return pmd;
639}
640
592static inline pgste_t pgste_get_lock(pte_t *ptep) 641static inline pgste_t pgste_get_lock(pte_t *ptep)
593{ 642{
594 unsigned long new = 0; 643 unsigned long new = 0;
@@ -889,7 +938,7 @@ static inline pte_t pte_mkclean(pte_t pte)
889 938
890static inline pte_t pte_mkdirty(pte_t pte) 939static inline pte_t pte_mkdirty(pte_t pte)
891{ 940{
892 pte_val(pte) |= _PAGE_DIRTY; 941 pte_val(pte) |= _PAGE_DIRTY | _PAGE_SOFT_DIRTY;
893 if (pte_val(pte) & _PAGE_WRITE) 942 if (pte_val(pte) & _PAGE_WRITE)
894 pte_val(pte) &= ~_PAGE_PROTECT; 943 pte_val(pte) &= ~_PAGE_PROTECT;
895 return pte; 944 return pte;
@@ -1218,8 +1267,10 @@ static inline int ptep_set_access_flags(struct vm_area_struct *vma,
1218 pte_t entry, int dirty) 1267 pte_t entry, int dirty)
1219{ 1268{
1220 pgste_t pgste; 1269 pgste_t pgste;
1270 pte_t oldpte;
1221 1271
1222 if (pte_same(*ptep, entry)) 1272 oldpte = *ptep;
1273 if (pte_same(oldpte, entry))
1223 return 0; 1274 return 0;
1224 if (mm_has_pgste(vma->vm_mm)) { 1275 if (mm_has_pgste(vma->vm_mm)) {
1225 pgste = pgste_get_lock(ptep); 1276 pgste = pgste_get_lock(ptep);
@@ -1229,7 +1280,8 @@ static inline int ptep_set_access_flags(struct vm_area_struct *vma,
1229 ptep_flush_direct(vma->vm_mm, address, ptep); 1280 ptep_flush_direct(vma->vm_mm, address, ptep);
1230 1281
1231 if (mm_has_pgste(vma->vm_mm)) { 1282 if (mm_has_pgste(vma->vm_mm)) {
1232 pgste_set_key(ptep, pgste, entry, vma->vm_mm); 1283 if (pte_val(oldpte) & _PAGE_INVALID)
1284 pgste_set_key(ptep, pgste, entry, vma->vm_mm);
1233 pgste = pgste_set_pte(ptep, pgste, entry); 1285 pgste = pgste_set_pte(ptep, pgste, entry);
1234 pgste_set_unlock(ptep, pgste); 1286 pgste_set_unlock(ptep, pgste);
1235 } else 1287 } else
@@ -1340,7 +1392,8 @@ static inline pmd_t pmd_mkclean(pmd_t pmd)
1340static inline pmd_t pmd_mkdirty(pmd_t pmd) 1392static inline pmd_t pmd_mkdirty(pmd_t pmd)
1341{ 1393{
1342 if (pmd_large(pmd)) { 1394 if (pmd_large(pmd)) {
1343 pmd_val(pmd) |= _SEGMENT_ENTRY_DIRTY; 1395 pmd_val(pmd) |= _SEGMENT_ENTRY_DIRTY |
1396 _SEGMENT_ENTRY_SOFT_DIRTY;
1344 if (pmd_val(pmd) & _SEGMENT_ENTRY_WRITE) 1397 if (pmd_val(pmd) & _SEGMENT_ENTRY_WRITE)
1345 pmd_val(pmd) &= ~_SEGMENT_ENTRY_PROTECT; 1398 pmd_val(pmd) &= ~_SEGMENT_ENTRY_PROTECT;
1346 } 1399 }
@@ -1371,7 +1424,8 @@ static inline pmd_t pmd_modify(pmd_t pmd, pgprot_t newprot)
1371 if (pmd_large(pmd)) { 1424 if (pmd_large(pmd)) {
1372 pmd_val(pmd) &= _SEGMENT_ENTRY_ORIGIN_LARGE | 1425 pmd_val(pmd) &= _SEGMENT_ENTRY_ORIGIN_LARGE |
1373 _SEGMENT_ENTRY_DIRTY | _SEGMENT_ENTRY_YOUNG | 1426 _SEGMENT_ENTRY_DIRTY | _SEGMENT_ENTRY_YOUNG |
1374 _SEGMENT_ENTRY_LARGE | _SEGMENT_ENTRY_SPLIT; 1427 _SEGMENT_ENTRY_LARGE | _SEGMENT_ENTRY_SPLIT |
1428 _SEGMENT_ENTRY_SOFT_DIRTY;
1375 pmd_val(pmd) |= massage_pgprot_pmd(newprot); 1429 pmd_val(pmd) |= massage_pgprot_pmd(newprot);
1376 if (!(pmd_val(pmd) & _SEGMENT_ENTRY_DIRTY)) 1430 if (!(pmd_val(pmd) & _SEGMENT_ENTRY_DIRTY))
1377 pmd_val(pmd) |= _SEGMENT_ENTRY_PROTECT; 1431 pmd_val(pmd) |= _SEGMENT_ENTRY_PROTECT;
diff --git a/arch/s390/include/asm/processor.h b/arch/s390/include/asm/processor.h
index 085fb0d3c54e..b16c3d0a1b9f 100644
--- a/arch/s390/include/asm/processor.h
+++ b/arch/s390/include/asm/processor.h
@@ -11,15 +11,19 @@
11#ifndef __ASM_S390_PROCESSOR_H 11#ifndef __ASM_S390_PROCESSOR_H
12#define __ASM_S390_PROCESSOR_H 12#define __ASM_S390_PROCESSOR_H
13 13
14#include <linux/const.h>
15
14#define CIF_MCCK_PENDING 0 /* machine check handling is pending */ 16#define CIF_MCCK_PENDING 0 /* machine check handling is pending */
15#define CIF_ASCE 1 /* user asce needs fixup / uaccess */ 17#define CIF_ASCE 1 /* user asce needs fixup / uaccess */
16#define CIF_NOHZ_DELAY 2 /* delay HZ disable for a tick */ 18#define CIF_NOHZ_DELAY 2 /* delay HZ disable for a tick */
17#define CIF_FPU 3 /* restore vector registers */ 19#define CIF_FPU 3 /* restore FPU registers */
20#define CIF_IGNORE_IRQ 4 /* ignore interrupt (for udelay) */
18 21
19#define _CIF_MCCK_PENDING (1<<CIF_MCCK_PENDING) 22#define _CIF_MCCK_PENDING _BITUL(CIF_MCCK_PENDING)
20#define _CIF_ASCE (1<<CIF_ASCE) 23#define _CIF_ASCE _BITUL(CIF_ASCE)
21#define _CIF_NOHZ_DELAY (1<<CIF_NOHZ_DELAY) 24#define _CIF_NOHZ_DELAY _BITUL(CIF_NOHZ_DELAY)
22#define _CIF_FPU (1<<CIF_FPU) 25#define _CIF_FPU _BITUL(CIF_FPU)
26#define _CIF_IGNORE_IRQ _BITUL(CIF_IGNORE_IRQ)
23 27
24#ifndef __ASSEMBLY__ 28#ifndef __ASSEMBLY__
25 29
@@ -30,21 +34,22 @@
30#include <asm/ptrace.h> 34#include <asm/ptrace.h>
31#include <asm/setup.h> 35#include <asm/setup.h>
32#include <asm/runtime_instr.h> 36#include <asm/runtime_instr.h>
33#include <asm/fpu-internal.h> 37#include <asm/fpu/types.h>
38#include <asm/fpu/internal.h>
34 39
35static inline void set_cpu_flag(int flag) 40static inline void set_cpu_flag(int flag)
36{ 41{
37 S390_lowcore.cpu_flags |= (1U << flag); 42 S390_lowcore.cpu_flags |= (1UL << flag);
38} 43}
39 44
40static inline void clear_cpu_flag(int flag) 45static inline void clear_cpu_flag(int flag)
41{ 46{
42 S390_lowcore.cpu_flags &= ~(1U << flag); 47 S390_lowcore.cpu_flags &= ~(1UL << flag);
43} 48}
44 49
45static inline int test_cpu_flag(int flag) 50static inline int test_cpu_flag(int flag)
46{ 51{
47 return !!(S390_lowcore.cpu_flags & (1U << flag)); 52 return !!(S390_lowcore.cpu_flags & (1UL << flag));
48} 53}
49 54
50#define arch_needs_cpu() test_cpu_flag(CIF_NOHZ_DELAY) 55#define arch_needs_cpu() test_cpu_flag(CIF_NOHZ_DELAY)
@@ -102,7 +107,6 @@ struct thread_struct {
102 struct list_head list; 107 struct list_head list;
103 /* cpu runtime instrumentation */ 108 /* cpu runtime instrumentation */
104 struct runtime_instr_cb *ri_cb; 109 struct runtime_instr_cb *ri_cb;
105 int ri_signum;
106 unsigned char trap_tdb[256]; /* Transaction abort diagnose block */ 110 unsigned char trap_tdb[256]; /* Transaction abort diagnose block */
107}; 111};
108 112
@@ -139,8 +143,10 @@ struct stack_frame {
139 143
140#define ARCH_MIN_TASKALIGN 8 144#define ARCH_MIN_TASKALIGN 8
141 145
146extern __vector128 init_task_fpu_regs[__NUM_VXRS];
142#define INIT_THREAD { \ 147#define INIT_THREAD { \
143 .ksp = sizeof(init_stack) + (unsigned long) &init_stack, \ 148 .ksp = sizeof(init_stack) + (unsigned long) &init_stack, \
149 .fpu.regs = (void *)&init_task_fpu_regs, \
144} 150}
145 151
146/* 152/*
@@ -217,7 +223,7 @@ static inline void __load_psw(psw_t psw)
217 * Set PSW mask to specified value, while leaving the 223 * Set PSW mask to specified value, while leaving the
218 * PSW addr pointing to the next instruction. 224 * PSW addr pointing to the next instruction.
219 */ 225 */
220static inline void __load_psw_mask (unsigned long mask) 226static inline void __load_psw_mask(unsigned long mask)
221{ 227{
222 unsigned long addr; 228 unsigned long addr;
223 psw_t psw; 229 psw_t psw;
@@ -243,6 +249,16 @@ static inline unsigned long __extract_psw(void)
243 return (((unsigned long) reg1) << 32) | ((unsigned long) reg2); 249 return (((unsigned long) reg1) << 32) | ((unsigned long) reg2);
244} 250}
245 251
252static inline void local_mcck_enable(void)
253{
254 __load_psw_mask(__extract_psw() | PSW_MASK_MCHECK);
255}
256
257static inline void local_mcck_disable(void)
258{
259 __load_psw_mask(__extract_psw() & ~PSW_MASK_MCHECK);
260}
261
246/* 262/*
247 * Rewind PSW instruction address by specified number of bytes. 263 * Rewind PSW instruction address by specified number of bytes.
248 */ 264 */
@@ -266,65 +282,14 @@ void enabled_wait(void);
266 */ 282 */
267static inline void __noreturn disabled_wait(unsigned long code) 283static inline void __noreturn disabled_wait(unsigned long code)
268{ 284{
269 unsigned long ctl_buf; 285 psw_t psw;
270 psw_t dw_psw;
271
272 dw_psw.mask = PSW_MASK_BASE | PSW_MASK_WAIT | PSW_MASK_BA | PSW_MASK_EA;
273 dw_psw.addr = code;
274 /*
275 * Store status and then load disabled wait psw,
276 * the processor is dead afterwards
277 */
278 asm volatile(
279 " stctg 0,0,0(%2)\n"
280 " ni 4(%2),0xef\n" /* switch off protection */
281 " lctlg 0,0,0(%2)\n"
282 " lghi 1,0x1000\n"
283 " stpt 0x328(1)\n" /* store timer */
284 " stckc 0x330(1)\n" /* store clock comparator */
285 " stpx 0x318(1)\n" /* store prefix register */
286 " stam 0,15,0x340(1)\n"/* store access registers */
287 " stfpc 0x31c(1)\n" /* store fpu control */
288 " std 0,0x200(1)\n" /* store f0 */
289 " std 1,0x208(1)\n" /* store f1 */
290 " std 2,0x210(1)\n" /* store f2 */
291 " std 3,0x218(1)\n" /* store f3 */
292 " std 4,0x220(1)\n" /* store f4 */
293 " std 5,0x228(1)\n" /* store f5 */
294 " std 6,0x230(1)\n" /* store f6 */
295 " std 7,0x238(1)\n" /* store f7 */
296 " std 8,0x240(1)\n" /* store f8 */
297 " std 9,0x248(1)\n" /* store f9 */
298 " std 10,0x250(1)\n" /* store f10 */
299 " std 11,0x258(1)\n" /* store f11 */
300 " std 12,0x260(1)\n" /* store f12 */
301 " std 13,0x268(1)\n" /* store f13 */
302 " std 14,0x270(1)\n" /* store f14 */
303 " std 15,0x278(1)\n" /* store f15 */
304 " stmg 0,15,0x280(1)\n"/* store general registers */
305 " stctg 0,15,0x380(1)\n"/* store control registers */
306 " oi 0x384(1),0x10\n"/* fake protection bit */
307 " lpswe 0(%1)"
308 : "=m" (ctl_buf)
309 : "a" (&dw_psw), "a" (&ctl_buf), "m" (dw_psw) : "cc", "0", "1");
310 while (1);
311}
312 286
313/* 287 psw.mask = PSW_MASK_BASE | PSW_MASK_WAIT | PSW_MASK_BA | PSW_MASK_EA;
314 * Use to set psw mask except for the first byte which 288 psw.addr = code;
315 * won't be changed by this function. 289 __load_psw(psw);
316 */ 290 while (1);
317static inline void
318__set_psw_mask(unsigned long mask)
319{
320 __load_psw_mask(mask | (arch_local_save_flags() & ~(-1UL >> 8)));
321} 291}
322 292
323#define local_mcck_enable() \
324 __set_psw_mask(PSW_KERNEL_BITS | PSW_MASK_DAT | PSW_MASK_MCHECK)
325#define local_mcck_disable() \
326 __set_psw_mask(PSW_KERNEL_BITS | PSW_MASK_DAT)
327
328/* 293/*
329 * Basic Machine Check/Program Check Handler. 294 * Basic Machine Check/Program Check Handler.
330 */ 295 */
diff --git a/arch/s390/include/asm/ptrace.h b/arch/s390/include/asm/ptrace.h
index 6feda2599282..37cbc50947f2 100644
--- a/arch/s390/include/asm/ptrace.h
+++ b/arch/s390/include/asm/ptrace.h
@@ -6,13 +6,14 @@
6#ifndef _S390_PTRACE_H 6#ifndef _S390_PTRACE_H
7#define _S390_PTRACE_H 7#define _S390_PTRACE_H
8 8
9#include <linux/const.h>
9#include <uapi/asm/ptrace.h> 10#include <uapi/asm/ptrace.h>
10 11
11#define PIF_SYSCALL 0 /* inside a system call */ 12#define PIF_SYSCALL 0 /* inside a system call */
12#define PIF_PER_TRAP 1 /* deliver sigtrap on return to user */ 13#define PIF_PER_TRAP 1 /* deliver sigtrap on return to user */
13 14
14#define _PIF_SYSCALL (1<<PIF_SYSCALL) 15#define _PIF_SYSCALL _BITUL(PIF_SYSCALL)
15#define _PIF_PER_TRAP (1<<PIF_PER_TRAP) 16#define _PIF_PER_TRAP _BITUL(PIF_PER_TRAP)
16 17
17#ifndef __ASSEMBLY__ 18#ifndef __ASSEMBLY__
18 19
@@ -128,17 +129,17 @@ struct per_struct_kernel {
128 129
129static inline void set_pt_regs_flag(struct pt_regs *regs, int flag) 130static inline void set_pt_regs_flag(struct pt_regs *regs, int flag)
130{ 131{
131 regs->flags |= (1U << flag); 132 regs->flags |= (1UL << flag);
132} 133}
133 134
134static inline void clear_pt_regs_flag(struct pt_regs *regs, int flag) 135static inline void clear_pt_regs_flag(struct pt_regs *regs, int flag)
135{ 136{
136 regs->flags &= ~(1U << flag); 137 regs->flags &= ~(1UL << flag);
137} 138}
138 139
139static inline int test_pt_regs_flag(struct pt_regs *regs, int flag) 140static inline int test_pt_regs_flag(struct pt_regs *regs, int flag)
140{ 141{
141 return !!(regs->flags & (1U << flag)); 142 return !!(regs->flags & (1UL << flag));
142} 143}
143 144
144/* 145/*
diff --git a/arch/s390/include/asm/setup.h b/arch/s390/include/asm/setup.h
index b8ffc1bd0a9f..23537661da0e 100644
--- a/arch/s390/include/asm/setup.h
+++ b/arch/s390/include/asm/setup.h
@@ -5,11 +5,38 @@
5#ifndef _ASM_S390_SETUP_H 5#ifndef _ASM_S390_SETUP_H
6#define _ASM_S390_SETUP_H 6#define _ASM_S390_SETUP_H
7 7
8#include <linux/const.h>
8#include <uapi/asm/setup.h> 9#include <uapi/asm/setup.h>
9 10
10 11
11#define PARMAREA 0x10400 12#define PARMAREA 0x10400
12 13
14/*
15 * Machine features detected in head.S
16 */
17
18#define MACHINE_FLAG_VM _BITUL(0)
19#define MACHINE_FLAG_IEEE _BITUL(1)
20#define MACHINE_FLAG_CSP _BITUL(2)
21#define MACHINE_FLAG_MVPG _BITUL(3)
22#define MACHINE_FLAG_DIAG44 _BITUL(4)
23#define MACHINE_FLAG_IDTE _BITUL(5)
24#define MACHINE_FLAG_DIAG9C _BITUL(6)
25#define MACHINE_FLAG_KVM _BITUL(8)
26#define MACHINE_FLAG_ESOP _BITUL(9)
27#define MACHINE_FLAG_EDAT1 _BITUL(10)
28#define MACHINE_FLAG_EDAT2 _BITUL(11)
29#define MACHINE_FLAG_LPAR _BITUL(12)
30#define MACHINE_FLAG_LPP _BITUL(13)
31#define MACHINE_FLAG_TOPOLOGY _BITUL(14)
32#define MACHINE_FLAG_TE _BITUL(15)
33#define MACHINE_FLAG_TLB_LC _BITUL(17)
34#define MACHINE_FLAG_VX _BITUL(18)
35#define MACHINE_FLAG_CAD _BITUL(19)
36
37#define LPP_MAGIC _BITUL(31)
38#define LPP_PFAULT_PID_MASK _AC(0xffffffff, UL)
39
13#ifndef __ASSEMBLY__ 40#ifndef __ASSEMBLY__
14 41
15#include <asm/lowcore.h> 42#include <asm/lowcore.h>
@@ -28,29 +55,6 @@ extern unsigned long max_physmem_end;
28 55
29extern void detect_memory_memblock(void); 56extern void detect_memory_memblock(void);
30 57
31/*
32 * Machine features detected in head.S
33 */
34
35#define MACHINE_FLAG_VM (1UL << 0)
36#define MACHINE_FLAG_IEEE (1UL << 1)
37#define MACHINE_FLAG_CSP (1UL << 2)
38#define MACHINE_FLAG_MVPG (1UL << 3)
39#define MACHINE_FLAG_DIAG44 (1UL << 4)
40#define MACHINE_FLAG_IDTE (1UL << 5)
41#define MACHINE_FLAG_DIAG9C (1UL << 6)
42#define MACHINE_FLAG_KVM (1UL << 8)
43#define MACHINE_FLAG_ESOP (1UL << 9)
44#define MACHINE_FLAG_EDAT1 (1UL << 10)
45#define MACHINE_FLAG_EDAT2 (1UL << 11)
46#define MACHINE_FLAG_LPAR (1UL << 12)
47#define MACHINE_FLAG_LPP (1UL << 13)
48#define MACHINE_FLAG_TOPOLOGY (1UL << 14)
49#define MACHINE_FLAG_TE (1UL << 15)
50#define MACHINE_FLAG_TLB_LC (1UL << 17)
51#define MACHINE_FLAG_VX (1UL << 18)
52#define MACHINE_FLAG_CAD (1UL << 19)
53
54#define MACHINE_IS_VM (S390_lowcore.machine_flags & MACHINE_FLAG_VM) 58#define MACHINE_IS_VM (S390_lowcore.machine_flags & MACHINE_FLAG_VM)
55#define MACHINE_IS_KVM (S390_lowcore.machine_flags & MACHINE_FLAG_KVM) 59#define MACHINE_IS_KVM (S390_lowcore.machine_flags & MACHINE_FLAG_KVM)
56#define MACHINE_IS_LPAR (S390_lowcore.machine_flags & MACHINE_FLAG_LPAR) 60#define MACHINE_IS_LPAR (S390_lowcore.machine_flags & MACHINE_FLAG_LPAR)
diff --git a/arch/s390/include/asm/spinlock.h b/arch/s390/include/asm/spinlock.h
index 0e37cd041241..63ebf37d3143 100644
--- a/arch/s390/include/asm/spinlock.h
+++ b/arch/s390/include/asm/spinlock.h
@@ -87,7 +87,6 @@ static inline void arch_spin_unlock(arch_spinlock_t *lp)
87{ 87{
88 typecheck(unsigned int, lp->lock); 88 typecheck(unsigned int, lp->lock);
89 asm volatile( 89 asm volatile(
90 __ASM_BARRIER
91 "st %1,%0\n" 90 "st %1,%0\n"
92 : "+Q" (lp->lock) 91 : "+Q" (lp->lock)
93 : "d" (0) 92 : "d" (0)
@@ -169,7 +168,6 @@ static inline int arch_write_trylock_once(arch_rwlock_t *rw)
169 \ 168 \
170 typecheck(unsigned int *, ptr); \ 169 typecheck(unsigned int *, ptr); \
171 asm volatile( \ 170 asm volatile( \
172 "bcr 14,0\n" \
173 op_string " %0,%2,%1\n" \ 171 op_string " %0,%2,%1\n" \
174 : "=d" (old_val), "+Q" (*ptr) \ 172 : "=d" (old_val), "+Q" (*ptr) \
175 : "d" (op_val) \ 173 : "d" (op_val) \
@@ -243,7 +241,6 @@ static inline void arch_write_unlock(arch_rwlock_t *rw)
243 241
244 rw->owner = 0; 242 rw->owner = 0;
245 asm volatile( 243 asm volatile(
246 __ASM_BARRIER
247 "st %1,%0\n" 244 "st %1,%0\n"
248 : "+Q" (rw->lock) 245 : "+Q" (rw->lock)
249 : "d" (0) 246 : "d" (0)
diff --git a/arch/s390/include/asm/switch_to.h b/arch/s390/include/asm/switch_to.h
index dcadfde32265..12d45f0cfdd9 100644
--- a/arch/s390/include/asm/switch_to.h
+++ b/arch/s390/include/asm/switch_to.h
@@ -8,7 +8,7 @@
8#define __ASM_SWITCH_TO_H 8#define __ASM_SWITCH_TO_H
9 9
10#include <linux/thread_info.h> 10#include <linux/thread_info.h>
11#include <asm/fpu-internal.h> 11#include <asm/fpu/api.h>
12#include <asm/ptrace.h> 12#include <asm/ptrace.h>
13 13
14extern struct task_struct *__switch_to(void *, void *); 14extern struct task_struct *__switch_to(void *, void *);
diff --git a/arch/s390/include/asm/thread_info.h b/arch/s390/include/asm/thread_info.h
index 4c27ec764c36..692b9247c019 100644
--- a/arch/s390/include/asm/thread_info.h
+++ b/arch/s390/include/asm/thread_info.h
@@ -7,6 +7,8 @@
7#ifndef _ASM_THREAD_INFO_H 7#ifndef _ASM_THREAD_INFO_H
8#define _ASM_THREAD_INFO_H 8#define _ASM_THREAD_INFO_H
9 9
10#include <linux/const.h>
11
10/* 12/*
11 * Size of kernel stack for each process 13 * Size of kernel stack for each process
12 */ 14 */
@@ -83,16 +85,16 @@ void arch_release_task_struct(struct task_struct *tsk);
83#define TIF_BLOCK_STEP 20 /* This task is block stepped */ 85#define TIF_BLOCK_STEP 20 /* This task is block stepped */
84#define TIF_UPROBE_SINGLESTEP 21 /* This task is uprobe single stepped */ 86#define TIF_UPROBE_SINGLESTEP 21 /* This task is uprobe single stepped */
85 87
86#define _TIF_NOTIFY_RESUME (1<<TIF_NOTIFY_RESUME) 88#define _TIF_NOTIFY_RESUME _BITUL(TIF_NOTIFY_RESUME)
87#define _TIF_SIGPENDING (1<<TIF_SIGPENDING) 89#define _TIF_SIGPENDING _BITUL(TIF_SIGPENDING)
88#define _TIF_NEED_RESCHED (1<<TIF_NEED_RESCHED) 90#define _TIF_NEED_RESCHED _BITUL(TIF_NEED_RESCHED)
89#define _TIF_SYSCALL_TRACE (1<<TIF_SYSCALL_TRACE) 91#define _TIF_SYSCALL_TRACE _BITUL(TIF_SYSCALL_TRACE)
90#define _TIF_SYSCALL_AUDIT (1<<TIF_SYSCALL_AUDIT) 92#define _TIF_SYSCALL_AUDIT _BITUL(TIF_SYSCALL_AUDIT)
91#define _TIF_SECCOMP (1<<TIF_SECCOMP) 93#define _TIF_SECCOMP _BITUL(TIF_SECCOMP)
92#define _TIF_SYSCALL_TRACEPOINT (1<<TIF_SYSCALL_TRACEPOINT) 94#define _TIF_SYSCALL_TRACEPOINT _BITUL(TIF_SYSCALL_TRACEPOINT)
93#define _TIF_UPROBE (1<<TIF_UPROBE) 95#define _TIF_UPROBE _BITUL(TIF_UPROBE)
94#define _TIF_31BIT (1<<TIF_31BIT) 96#define _TIF_31BIT _BITUL(TIF_31BIT)
95#define _TIF_SINGLE_STEP (1<<TIF_SINGLE_STEP) 97#define _TIF_SINGLE_STEP _BITUL(TIF_SINGLE_STEP)
96 98
97#define is_32bit_task() (test_thread_flag(TIF_31BIT)) 99#define is_32bit_task() (test_thread_flag(TIF_31BIT))
98 100
diff --git a/arch/s390/include/asm/trace/diag.h b/arch/s390/include/asm/trace/diag.h
new file mode 100644
index 000000000000..776f307960cc
--- /dev/null
+++ b/arch/s390/include/asm/trace/diag.h
@@ -0,0 +1,43 @@
1/*
2 * Tracepoint header for s390 diagnose calls
3 *
4 * Copyright IBM Corp. 2015
5 * Author(s): Martin Schwidefsky <schwidefsky@de.ibm.com>
6 */
7
8#undef TRACE_SYSTEM
9#define TRACE_SYSTEM s390
10
11#if !defined(_TRACE_S390_DIAG_H) || defined(TRACE_HEADER_MULTI_READ)
12#define _TRACE_S390_DIAG_H
13
14#include <linux/tracepoint.h>
15
16#undef TRACE_INCLUDE_PATH
17#undef TRACE_INCLUDE_FILE
18
19#define TRACE_INCLUDE_PATH asm/trace
20#define TRACE_INCLUDE_FILE diag
21
22TRACE_EVENT(diagnose,
23 TP_PROTO(unsigned short nr),
24 TP_ARGS(nr),
25 TP_STRUCT__entry(
26 __field(unsigned short, nr)
27 ),
28 TP_fast_assign(
29 __entry->nr = nr;
30 ),
31 TP_printk("nr=0x%x", __entry->nr)
32);
33
34#ifdef CONFIG_TRACEPOINTS
35void trace_diagnose_norecursion(int diag_nr);
36#else
37static inline void trace_diagnose_norecursion(int diag_nr) { }
38#endif
39
40#endif /* _TRACE_S390_DIAG_H */
41
42/* This part must be outside protection */
43#include <trace/define_trace.h>
diff --git a/arch/s390/kernel/Makefile b/arch/s390/kernel/Makefile
index b756c6348ac6..dc167a23b920 100644
--- a/arch/s390/kernel/Makefile
+++ b/arch/s390/kernel/Makefile
@@ -66,6 +66,8 @@ obj-$(CONFIG_UPROBES) += uprobes.o
66obj-$(CONFIG_PERF_EVENTS) += perf_event.o perf_cpum_cf.o perf_cpum_sf.o 66obj-$(CONFIG_PERF_EVENTS) += perf_event.o perf_cpum_cf.o perf_cpum_sf.o
67obj-$(CONFIG_PERF_EVENTS) += perf_cpum_cf_events.o 67obj-$(CONFIG_PERF_EVENTS) += perf_cpum_cf_events.o
68 68
69obj-$(CONFIG_TRACEPOINTS) += trace.o
70
69# vdso 71# vdso
70obj-y += vdso64/ 72obj-y += vdso64/
71obj-$(CONFIG_COMPAT) += vdso32/ 73obj-$(CONFIG_COMPAT) += vdso32/
diff --git a/arch/s390/kernel/asm-offsets.c b/arch/s390/kernel/asm-offsets.c
index 3aeeb1b562c0..9cd248f637c7 100644
--- a/arch/s390/kernel/asm-offsets.c
+++ b/arch/s390/kernel/asm-offsets.c
@@ -23,59 +23,64 @@
23 23
24int main(void) 24int main(void)
25{ 25{
26 DEFINE(__TASK_thread_info, offsetof(struct task_struct, stack)); 26 /* task struct offsets */
27 DEFINE(__TASK_thread, offsetof(struct task_struct, thread)); 27 OFFSET(__TASK_thread_info, task_struct, stack);
28 DEFINE(__TASK_pid, offsetof(struct task_struct, pid)); 28 OFFSET(__TASK_thread, task_struct, thread);
29 OFFSET(__TASK_pid, task_struct, pid);
29 BLANK(); 30 BLANK();
30 DEFINE(__THREAD_ksp, offsetof(struct thread_struct, ksp)); 31 /* thread struct offsets */
31 DEFINE(__THREAD_FPU_fpc, offsetof(struct thread_struct, fpu.fpc)); 32 OFFSET(__THREAD_ksp, thread_struct, ksp);
32 DEFINE(__THREAD_FPU_flags, offsetof(struct thread_struct, fpu.flags)); 33 OFFSET(__THREAD_FPU_fpc, thread_struct, fpu.fpc);
33 DEFINE(__THREAD_FPU_regs, offsetof(struct thread_struct, fpu.regs)); 34 OFFSET(__THREAD_FPU_regs, thread_struct, fpu.regs);
34 DEFINE(__THREAD_per_cause, offsetof(struct thread_struct, per_event.cause)); 35 OFFSET(__THREAD_per_cause, thread_struct, per_event.cause);
35 DEFINE(__THREAD_per_address, offsetof(struct thread_struct, per_event.address)); 36 OFFSET(__THREAD_per_address, thread_struct, per_event.address);
36 DEFINE(__THREAD_per_paid, offsetof(struct thread_struct, per_event.paid)); 37 OFFSET(__THREAD_per_paid, thread_struct, per_event.paid);
37 DEFINE(__THREAD_trap_tdb, offsetof(struct thread_struct, trap_tdb)); 38 OFFSET(__THREAD_trap_tdb, thread_struct, trap_tdb);
38 BLANK(); 39 BLANK();
39 DEFINE(__TI_task, offsetof(struct thread_info, task)); 40 /* thread info offsets */
40 DEFINE(__TI_flags, offsetof(struct thread_info, flags)); 41 OFFSET(__TI_task, thread_info, task);
41 DEFINE(__TI_sysc_table, offsetof(struct thread_info, sys_call_table)); 42 OFFSET(__TI_flags, thread_info, flags);
42 DEFINE(__TI_cpu, offsetof(struct thread_info, cpu)); 43 OFFSET(__TI_sysc_table, thread_info, sys_call_table);
43 DEFINE(__TI_precount, offsetof(struct thread_info, preempt_count)); 44 OFFSET(__TI_cpu, thread_info, cpu);
44 DEFINE(__TI_user_timer, offsetof(struct thread_info, user_timer)); 45 OFFSET(__TI_precount, thread_info, preempt_count);
45 DEFINE(__TI_system_timer, offsetof(struct thread_info, system_timer)); 46 OFFSET(__TI_user_timer, thread_info, user_timer);
46 DEFINE(__TI_last_break, offsetof(struct thread_info, last_break)); 47 OFFSET(__TI_system_timer, thread_info, system_timer);
48 OFFSET(__TI_last_break, thread_info, last_break);
47 BLANK(); 49 BLANK();
48 DEFINE(__PT_ARGS, offsetof(struct pt_regs, args)); 50 /* pt_regs offsets */
49 DEFINE(__PT_PSW, offsetof(struct pt_regs, psw)); 51 OFFSET(__PT_ARGS, pt_regs, args);
50 DEFINE(__PT_GPRS, offsetof(struct pt_regs, gprs)); 52 OFFSET(__PT_PSW, pt_regs, psw);
51 DEFINE(__PT_ORIG_GPR2, offsetof(struct pt_regs, orig_gpr2)); 53 OFFSET(__PT_GPRS, pt_regs, gprs);
52 DEFINE(__PT_INT_CODE, offsetof(struct pt_regs, int_code)); 54 OFFSET(__PT_ORIG_GPR2, pt_regs, orig_gpr2);
53 DEFINE(__PT_INT_PARM, offsetof(struct pt_regs, int_parm)); 55 OFFSET(__PT_INT_CODE, pt_regs, int_code);
54 DEFINE(__PT_INT_PARM_LONG, offsetof(struct pt_regs, int_parm_long)); 56 OFFSET(__PT_INT_PARM, pt_regs, int_parm);
55 DEFINE(__PT_FLAGS, offsetof(struct pt_regs, flags)); 57 OFFSET(__PT_INT_PARM_LONG, pt_regs, int_parm_long);
58 OFFSET(__PT_FLAGS, pt_regs, flags);
56 DEFINE(__PT_SIZE, sizeof(struct pt_regs)); 59 DEFINE(__PT_SIZE, sizeof(struct pt_regs));
57 BLANK(); 60 BLANK();
58 DEFINE(__SF_BACKCHAIN, offsetof(struct stack_frame, back_chain)); 61 /* stack_frame offsets */
59 DEFINE(__SF_GPRS, offsetof(struct stack_frame, gprs)); 62 OFFSET(__SF_BACKCHAIN, stack_frame, back_chain);
60 DEFINE(__SF_EMPTY, offsetof(struct stack_frame, empty1)); 63 OFFSET(__SF_GPRS, stack_frame, gprs);
64 OFFSET(__SF_EMPTY, stack_frame, empty1);
61 BLANK(); 65 BLANK();
62 /* timeval/timezone offsets for use by vdso */ 66 /* timeval/timezone offsets for use by vdso */
63 DEFINE(__VDSO_UPD_COUNT, offsetof(struct vdso_data, tb_update_count)); 67 OFFSET(__VDSO_UPD_COUNT, vdso_data, tb_update_count);
64 DEFINE(__VDSO_XTIME_STAMP, offsetof(struct vdso_data, xtime_tod_stamp)); 68 OFFSET(__VDSO_XTIME_STAMP, vdso_data, xtime_tod_stamp);
65 DEFINE(__VDSO_XTIME_SEC, offsetof(struct vdso_data, xtime_clock_sec)); 69 OFFSET(__VDSO_XTIME_SEC, vdso_data, xtime_clock_sec);
66 DEFINE(__VDSO_XTIME_NSEC, offsetof(struct vdso_data, xtime_clock_nsec)); 70 OFFSET(__VDSO_XTIME_NSEC, vdso_data, xtime_clock_nsec);
67 DEFINE(__VDSO_XTIME_CRS_SEC, offsetof(struct vdso_data, xtime_coarse_sec)); 71 OFFSET(__VDSO_XTIME_CRS_SEC, vdso_data, xtime_coarse_sec);
68 DEFINE(__VDSO_XTIME_CRS_NSEC, offsetof(struct vdso_data, xtime_coarse_nsec)); 72 OFFSET(__VDSO_XTIME_CRS_NSEC, vdso_data, xtime_coarse_nsec);
69 DEFINE(__VDSO_WTOM_SEC, offsetof(struct vdso_data, wtom_clock_sec)); 73 OFFSET(__VDSO_WTOM_SEC, vdso_data, wtom_clock_sec);
70 DEFINE(__VDSO_WTOM_NSEC, offsetof(struct vdso_data, wtom_clock_nsec)); 74 OFFSET(__VDSO_WTOM_NSEC, vdso_data, wtom_clock_nsec);
71 DEFINE(__VDSO_WTOM_CRS_SEC, offsetof(struct vdso_data, wtom_coarse_sec)); 75 OFFSET(__VDSO_WTOM_CRS_SEC, vdso_data, wtom_coarse_sec);
72 DEFINE(__VDSO_WTOM_CRS_NSEC, offsetof(struct vdso_data, wtom_coarse_nsec)); 76 OFFSET(__VDSO_WTOM_CRS_NSEC, vdso_data, wtom_coarse_nsec);
73 DEFINE(__VDSO_TIMEZONE, offsetof(struct vdso_data, tz_minuteswest)); 77 OFFSET(__VDSO_TIMEZONE, vdso_data, tz_minuteswest);
74 DEFINE(__VDSO_ECTG_OK, offsetof(struct vdso_data, ectg_available)); 78 OFFSET(__VDSO_ECTG_OK, vdso_data, ectg_available);
75 DEFINE(__VDSO_TK_MULT, offsetof(struct vdso_data, tk_mult)); 79 OFFSET(__VDSO_TK_MULT, vdso_data, tk_mult);
76 DEFINE(__VDSO_TK_SHIFT, offsetof(struct vdso_data, tk_shift)); 80 OFFSET(__VDSO_TK_SHIFT, vdso_data, tk_shift);
77 DEFINE(__VDSO_ECTG_BASE, offsetof(struct vdso_per_cpu_data, ectg_timer_base)); 81 OFFSET(__VDSO_ECTG_BASE, vdso_per_cpu_data, ectg_timer_base);
78 DEFINE(__VDSO_ECTG_USER, offsetof(struct vdso_per_cpu_data, ectg_user_time)); 82 OFFSET(__VDSO_ECTG_USER, vdso_per_cpu_data, ectg_user_time);
83 BLANK();
79 /* constants used by the vdso */ 84 /* constants used by the vdso */
80 DEFINE(__CLOCK_REALTIME, CLOCK_REALTIME); 85 DEFINE(__CLOCK_REALTIME, CLOCK_REALTIME);
81 DEFINE(__CLOCK_MONOTONIC, CLOCK_MONOTONIC); 86 DEFINE(__CLOCK_MONOTONIC, CLOCK_MONOTONIC);
@@ -86,102 +91,105 @@ int main(void)
86 DEFINE(__CLOCK_COARSE_RES, LOW_RES_NSEC); 91 DEFINE(__CLOCK_COARSE_RES, LOW_RES_NSEC);
87 BLANK(); 92 BLANK();
88 /* idle data offsets */ 93 /* idle data offsets */
89 DEFINE(__CLOCK_IDLE_ENTER, offsetof(struct s390_idle_data, clock_idle_enter)); 94 OFFSET(__CLOCK_IDLE_ENTER, s390_idle_data, clock_idle_enter);
90 DEFINE(__CLOCK_IDLE_EXIT, offsetof(struct s390_idle_data, clock_idle_exit)); 95 OFFSET(__CLOCK_IDLE_EXIT, s390_idle_data, clock_idle_exit);
91 DEFINE(__TIMER_IDLE_ENTER, offsetof(struct s390_idle_data, timer_idle_enter)); 96 OFFSET(__TIMER_IDLE_ENTER, s390_idle_data, timer_idle_enter);
92 DEFINE(__TIMER_IDLE_EXIT, offsetof(struct s390_idle_data, timer_idle_exit)); 97 OFFSET(__TIMER_IDLE_EXIT, s390_idle_data, timer_idle_exit);
93 /* lowcore offsets */
94 DEFINE(__LC_EXT_PARAMS, offsetof(struct _lowcore, ext_params));
95 DEFINE(__LC_EXT_CPU_ADDR, offsetof(struct _lowcore, ext_cpu_addr));
96 DEFINE(__LC_EXT_INT_CODE, offsetof(struct _lowcore, ext_int_code));
97 DEFINE(__LC_SVC_ILC, offsetof(struct _lowcore, svc_ilc));
98 DEFINE(__LC_SVC_INT_CODE, offsetof(struct _lowcore, svc_code));
99 DEFINE(__LC_PGM_ILC, offsetof(struct _lowcore, pgm_ilc));
100 DEFINE(__LC_PGM_INT_CODE, offsetof(struct _lowcore, pgm_code));
101 DEFINE(__LC_TRANS_EXC_CODE, offsetof(struct _lowcore, trans_exc_code));
102 DEFINE(__LC_MON_CLASS_NR, offsetof(struct _lowcore, mon_class_num));
103 DEFINE(__LC_PER_CODE, offsetof(struct _lowcore, per_code));
104 DEFINE(__LC_PER_ATMID, offsetof(struct _lowcore, per_atmid));
105 DEFINE(__LC_PER_ADDRESS, offsetof(struct _lowcore, per_address));
106 DEFINE(__LC_EXC_ACCESS_ID, offsetof(struct _lowcore, exc_access_id));
107 DEFINE(__LC_PER_ACCESS_ID, offsetof(struct _lowcore, per_access_id));
108 DEFINE(__LC_OP_ACCESS_ID, offsetof(struct _lowcore, op_access_id));
109 DEFINE(__LC_AR_MODE_ID, offsetof(struct _lowcore, ar_mode_id));
110 DEFINE(__LC_MON_CODE, offsetof(struct _lowcore, monitor_code));
111 DEFINE(__LC_SUBCHANNEL_ID, offsetof(struct _lowcore, subchannel_id));
112 DEFINE(__LC_SUBCHANNEL_NR, offsetof(struct _lowcore, subchannel_nr));
113 DEFINE(__LC_IO_INT_PARM, offsetof(struct _lowcore, io_int_parm));
114 DEFINE(__LC_IO_INT_WORD, offsetof(struct _lowcore, io_int_word));
115 DEFINE(__LC_STFL_FAC_LIST, offsetof(struct _lowcore, stfl_fac_list));
116 DEFINE(__LC_MCCK_CODE, offsetof(struct _lowcore, mcck_interruption_code));
117 DEFINE(__LC_MCCK_EXT_DAM_CODE, offsetof(struct _lowcore, external_damage_code));
118 DEFINE(__LC_RST_OLD_PSW, offsetof(struct _lowcore, restart_old_psw));
119 DEFINE(__LC_EXT_OLD_PSW, offsetof(struct _lowcore, external_old_psw));
120 DEFINE(__LC_SVC_OLD_PSW, offsetof(struct _lowcore, svc_old_psw));
121 DEFINE(__LC_PGM_OLD_PSW, offsetof(struct _lowcore, program_old_psw));
122 DEFINE(__LC_MCK_OLD_PSW, offsetof(struct _lowcore, mcck_old_psw));
123 DEFINE(__LC_IO_OLD_PSW, offsetof(struct _lowcore, io_old_psw));
124 DEFINE(__LC_RST_NEW_PSW, offsetof(struct _lowcore, restart_psw));
125 DEFINE(__LC_EXT_NEW_PSW, offsetof(struct _lowcore, external_new_psw));
126 DEFINE(__LC_SVC_NEW_PSW, offsetof(struct _lowcore, svc_new_psw));
127 DEFINE(__LC_PGM_NEW_PSW, offsetof(struct _lowcore, program_new_psw));
128 DEFINE(__LC_MCK_NEW_PSW, offsetof(struct _lowcore, mcck_new_psw));
129 DEFINE(__LC_IO_NEW_PSW, offsetof(struct _lowcore, io_new_psw));
130 BLANK(); 98 BLANK();
131 DEFINE(__LC_SAVE_AREA_SYNC, offsetof(struct _lowcore, save_area_sync)); 99 /* hardware defined lowcore locations 0x000 - 0x1ff */
132 DEFINE(__LC_SAVE_AREA_ASYNC, offsetof(struct _lowcore, save_area_async)); 100 OFFSET(__LC_EXT_PARAMS, _lowcore, ext_params);
133 DEFINE(__LC_SAVE_AREA_RESTART, offsetof(struct _lowcore, save_area_restart)); 101 OFFSET(__LC_EXT_CPU_ADDR, _lowcore, ext_cpu_addr);
134 DEFINE(__LC_CPU_FLAGS, offsetof(struct _lowcore, cpu_flags)); 102 OFFSET(__LC_EXT_INT_CODE, _lowcore, ext_int_code);
135 DEFINE(__LC_RETURN_PSW, offsetof(struct _lowcore, return_psw)); 103 OFFSET(__LC_SVC_ILC, _lowcore, svc_ilc);
136 DEFINE(__LC_RETURN_MCCK_PSW, offsetof(struct _lowcore, return_mcck_psw)); 104 OFFSET(__LC_SVC_INT_CODE, _lowcore, svc_code);
137 DEFINE(__LC_SYNC_ENTER_TIMER, offsetof(struct _lowcore, sync_enter_timer)); 105 OFFSET(__LC_PGM_ILC, _lowcore, pgm_ilc);
138 DEFINE(__LC_ASYNC_ENTER_TIMER, offsetof(struct _lowcore, async_enter_timer)); 106 OFFSET(__LC_PGM_INT_CODE, _lowcore, pgm_code);
139 DEFINE(__LC_MCCK_ENTER_TIMER, offsetof(struct _lowcore, mcck_enter_timer)); 107 OFFSET(__LC_DATA_EXC_CODE, _lowcore, data_exc_code);
140 DEFINE(__LC_EXIT_TIMER, offsetof(struct _lowcore, exit_timer)); 108 OFFSET(__LC_MON_CLASS_NR, _lowcore, mon_class_num);
141 DEFINE(__LC_USER_TIMER, offsetof(struct _lowcore, user_timer)); 109 OFFSET(__LC_PER_CODE, _lowcore, per_code);
142 DEFINE(__LC_SYSTEM_TIMER, offsetof(struct _lowcore, system_timer)); 110 OFFSET(__LC_PER_ATMID, _lowcore, per_atmid);
143 DEFINE(__LC_STEAL_TIMER, offsetof(struct _lowcore, steal_timer)); 111 OFFSET(__LC_PER_ADDRESS, _lowcore, per_address);
144 DEFINE(__LC_LAST_UPDATE_TIMER, offsetof(struct _lowcore, last_update_timer)); 112 OFFSET(__LC_EXC_ACCESS_ID, _lowcore, exc_access_id);
145 DEFINE(__LC_LAST_UPDATE_CLOCK, offsetof(struct _lowcore, last_update_clock)); 113 OFFSET(__LC_PER_ACCESS_ID, _lowcore, per_access_id);
146 DEFINE(__LC_CURRENT, offsetof(struct _lowcore, current_task)); 114 OFFSET(__LC_OP_ACCESS_ID, _lowcore, op_access_id);
147 DEFINE(__LC_CURRENT_PID, offsetof(struct _lowcore, current_pid)); 115 OFFSET(__LC_AR_MODE_ID, _lowcore, ar_mode_id);
148 DEFINE(__LC_THREAD_INFO, offsetof(struct _lowcore, thread_info)); 116 OFFSET(__LC_TRANS_EXC_CODE, _lowcore, trans_exc_code);
149 DEFINE(__LC_KERNEL_STACK, offsetof(struct _lowcore, kernel_stack)); 117 OFFSET(__LC_MON_CODE, _lowcore, monitor_code);
150 DEFINE(__LC_ASYNC_STACK, offsetof(struct _lowcore, async_stack)); 118 OFFSET(__LC_SUBCHANNEL_ID, _lowcore, subchannel_id);
151 DEFINE(__LC_PANIC_STACK, offsetof(struct _lowcore, panic_stack)); 119 OFFSET(__LC_SUBCHANNEL_NR, _lowcore, subchannel_nr);
152 DEFINE(__LC_RESTART_STACK, offsetof(struct _lowcore, restart_stack)); 120 OFFSET(__LC_IO_INT_PARM, _lowcore, io_int_parm);
153 DEFINE(__LC_RESTART_FN, offsetof(struct _lowcore, restart_fn)); 121 OFFSET(__LC_IO_INT_WORD, _lowcore, io_int_word);
154 DEFINE(__LC_RESTART_DATA, offsetof(struct _lowcore, restart_data)); 122 OFFSET(__LC_STFL_FAC_LIST, _lowcore, stfl_fac_list);
155 DEFINE(__LC_RESTART_SOURCE, offsetof(struct _lowcore, restart_source)); 123 OFFSET(__LC_MCCK_CODE, _lowcore, mcck_interruption_code);
156 DEFINE(__LC_KERNEL_ASCE, offsetof(struct _lowcore, kernel_asce)); 124 OFFSET(__LC_MCCK_FAIL_STOR_ADDR, _lowcore, failing_storage_address);
157 DEFINE(__LC_USER_ASCE, offsetof(struct _lowcore, user_asce)); 125 OFFSET(__LC_LAST_BREAK, _lowcore, breaking_event_addr);
158 DEFINE(__LC_INT_CLOCK, offsetof(struct _lowcore, int_clock)); 126 OFFSET(__LC_RST_OLD_PSW, _lowcore, restart_old_psw);
159 DEFINE(__LC_MCCK_CLOCK, offsetof(struct _lowcore, mcck_clock)); 127 OFFSET(__LC_EXT_OLD_PSW, _lowcore, external_old_psw);
160 DEFINE(__LC_MACHINE_FLAGS, offsetof(struct _lowcore, machine_flags)); 128 OFFSET(__LC_SVC_OLD_PSW, _lowcore, svc_old_psw);
161 DEFINE(__LC_DUMP_REIPL, offsetof(struct _lowcore, ipib)); 129 OFFSET(__LC_PGM_OLD_PSW, _lowcore, program_old_psw);
130 OFFSET(__LC_MCK_OLD_PSW, _lowcore, mcck_old_psw);
131 OFFSET(__LC_IO_OLD_PSW, _lowcore, io_old_psw);
132 OFFSET(__LC_RST_NEW_PSW, _lowcore, restart_psw);
133 OFFSET(__LC_EXT_NEW_PSW, _lowcore, external_new_psw);
134 OFFSET(__LC_SVC_NEW_PSW, _lowcore, svc_new_psw);
135 OFFSET(__LC_PGM_NEW_PSW, _lowcore, program_new_psw);
136 OFFSET(__LC_MCK_NEW_PSW, _lowcore, mcck_new_psw);
137 OFFSET(__LC_IO_NEW_PSW, _lowcore, io_new_psw);
138 /* software defined lowcore locations 0x200 - 0xdff*/
139 OFFSET(__LC_SAVE_AREA_SYNC, _lowcore, save_area_sync);
140 OFFSET(__LC_SAVE_AREA_ASYNC, _lowcore, save_area_async);
141 OFFSET(__LC_SAVE_AREA_RESTART, _lowcore, save_area_restart);
142 OFFSET(__LC_CPU_FLAGS, _lowcore, cpu_flags);
143 OFFSET(__LC_RETURN_PSW, _lowcore, return_psw);
144 OFFSET(__LC_RETURN_MCCK_PSW, _lowcore, return_mcck_psw);
145 OFFSET(__LC_SYNC_ENTER_TIMER, _lowcore, sync_enter_timer);
146 OFFSET(__LC_ASYNC_ENTER_TIMER, _lowcore, async_enter_timer);
147 OFFSET(__LC_MCCK_ENTER_TIMER, _lowcore, mcck_enter_timer);
148 OFFSET(__LC_EXIT_TIMER, _lowcore, exit_timer);
149 OFFSET(__LC_USER_TIMER, _lowcore, user_timer);
150 OFFSET(__LC_SYSTEM_TIMER, _lowcore, system_timer);
151 OFFSET(__LC_STEAL_TIMER, _lowcore, steal_timer);
152 OFFSET(__LC_LAST_UPDATE_TIMER, _lowcore, last_update_timer);
153 OFFSET(__LC_LAST_UPDATE_CLOCK, _lowcore, last_update_clock);
154 OFFSET(__LC_INT_CLOCK, _lowcore, int_clock);
155 OFFSET(__LC_MCCK_CLOCK, _lowcore, mcck_clock);
156 OFFSET(__LC_CURRENT, _lowcore, current_task);
157 OFFSET(__LC_THREAD_INFO, _lowcore, thread_info);
158 OFFSET(__LC_KERNEL_STACK, _lowcore, kernel_stack);
159 OFFSET(__LC_ASYNC_STACK, _lowcore, async_stack);
160 OFFSET(__LC_PANIC_STACK, _lowcore, panic_stack);
161 OFFSET(__LC_RESTART_STACK, _lowcore, restart_stack);
162 OFFSET(__LC_RESTART_FN, _lowcore, restart_fn);
163 OFFSET(__LC_RESTART_DATA, _lowcore, restart_data);
164 OFFSET(__LC_RESTART_SOURCE, _lowcore, restart_source);
165 OFFSET(__LC_USER_ASCE, _lowcore, user_asce);
166 OFFSET(__LC_LPP, _lowcore, lpp);
167 OFFSET(__LC_CURRENT_PID, _lowcore, current_pid);
168 OFFSET(__LC_PERCPU_OFFSET, _lowcore, percpu_offset);
169 OFFSET(__LC_VDSO_PER_CPU, _lowcore, vdso_per_cpu_data);
170 OFFSET(__LC_MACHINE_FLAGS, _lowcore, machine_flags);
171 OFFSET(__LC_GMAP, _lowcore, gmap);
172 OFFSET(__LC_PASTE, _lowcore, paste);
173 /* software defined ABI-relevant lowcore locations 0xe00 - 0xe20 */
174 OFFSET(__LC_DUMP_REIPL, _lowcore, ipib);
175 /* hardware defined lowcore locations 0x1000 - 0x18ff */
176 OFFSET(__LC_VX_SAVE_AREA_ADDR, _lowcore, vector_save_area_addr);
177 OFFSET(__LC_EXT_PARAMS2, _lowcore, ext_params2);
178 OFFSET(SAVE_AREA_BASE, _lowcore, floating_pt_save_area);
179 OFFSET(__LC_FPREGS_SAVE_AREA, _lowcore, floating_pt_save_area);
180 OFFSET(__LC_GPREGS_SAVE_AREA, _lowcore, gpregs_save_area);
181 OFFSET(__LC_PSW_SAVE_AREA, _lowcore, psw_save_area);
182 OFFSET(__LC_PREFIX_SAVE_AREA, _lowcore, prefixreg_save_area);
183 OFFSET(__LC_FP_CREG_SAVE_AREA, _lowcore, fpt_creg_save_area);
184 OFFSET(__LC_CPU_TIMER_SAVE_AREA, _lowcore, cpu_timer_save_area);
185 OFFSET(__LC_CLOCK_COMP_SAVE_AREA, _lowcore, clock_comp_save_area);
186 OFFSET(__LC_AREGS_SAVE_AREA, _lowcore, access_regs_save_area);
187 OFFSET(__LC_CREGS_SAVE_AREA, _lowcore, cregs_save_area);
188 OFFSET(__LC_PGM_TDB, _lowcore, pgm_tdb);
162 BLANK(); 189 BLANK();
163 DEFINE(__LC_CPU_TIMER_SAVE_AREA, offsetof(struct _lowcore, cpu_timer_save_area)); 190 /* gmap/sie offsets */
164 DEFINE(__LC_CLOCK_COMP_SAVE_AREA, offsetof(struct _lowcore, clock_comp_save_area)); 191 OFFSET(__GMAP_ASCE, gmap, asce);
165 DEFINE(__LC_PSW_SAVE_AREA, offsetof(struct _lowcore, psw_save_area)); 192 OFFSET(__SIE_PROG0C, kvm_s390_sie_block, prog0c);
166 DEFINE(__LC_PREFIX_SAVE_AREA, offsetof(struct _lowcore, prefixreg_save_area)); 193 OFFSET(__SIE_PROG20, kvm_s390_sie_block, prog20);
167 DEFINE(__LC_AREGS_SAVE_AREA, offsetof(struct _lowcore, access_regs_save_area));
168 DEFINE(__LC_FPREGS_SAVE_AREA, offsetof(struct _lowcore, floating_pt_save_area));
169 DEFINE(__LC_GPREGS_SAVE_AREA, offsetof(struct _lowcore, gpregs_save_area));
170 DEFINE(__LC_CREGS_SAVE_AREA, offsetof(struct _lowcore, cregs_save_area));
171 DEFINE(__LC_DATA_EXC_CODE, offsetof(struct _lowcore, data_exc_code));
172 DEFINE(__LC_MCCK_FAIL_STOR_ADDR, offsetof(struct _lowcore, failing_storage_address));
173 DEFINE(__LC_VX_SAVE_AREA_ADDR, offsetof(struct _lowcore, vector_save_area_addr));
174 DEFINE(__LC_EXT_PARAMS2, offsetof(struct _lowcore, ext_params2));
175 DEFINE(SAVE_AREA_BASE, offsetof(struct _lowcore, floating_pt_save_area));
176 DEFINE(__LC_PASTE, offsetof(struct _lowcore, paste));
177 DEFINE(__LC_FP_CREG_SAVE_AREA, offsetof(struct _lowcore, fpt_creg_save_area));
178 DEFINE(__LC_LAST_BREAK, offsetof(struct _lowcore, breaking_event_addr));
179 DEFINE(__LC_PERCPU_OFFSET, offsetof(struct _lowcore, percpu_offset));
180 DEFINE(__LC_VDSO_PER_CPU, offsetof(struct _lowcore, vdso_per_cpu_data));
181 DEFINE(__LC_GMAP, offsetof(struct _lowcore, gmap));
182 DEFINE(__LC_PGM_TDB, offsetof(struct _lowcore, pgm_tdb));
183 DEFINE(__GMAP_ASCE, offsetof(struct gmap, asce));
184 DEFINE(__SIE_PROG0C, offsetof(struct kvm_s390_sie_block, prog0c));
185 DEFINE(__SIE_PROG20, offsetof(struct kvm_s390_sie_block, prog20));
186 return 0; 194 return 0;
187} 195}
diff --git a/arch/s390/kernel/compat_signal.c b/arch/s390/kernel/compat_signal.c
index e0f9d270b30f..66c94417c0ba 100644
--- a/arch/s390/kernel/compat_signal.c
+++ b/arch/s390/kernel/compat_signal.c
@@ -249,7 +249,7 @@ static int save_sigregs_ext32(struct pt_regs *regs,
249 return -EFAULT; 249 return -EFAULT;
250 250
251 /* Save vector registers to signal stack */ 251 /* Save vector registers to signal stack */
252 if (is_vx_task(current)) { 252 if (MACHINE_HAS_VX) {
253 for (i = 0; i < __NUM_VXRS_LOW; i++) 253 for (i = 0; i < __NUM_VXRS_LOW; i++)
254 vxrs[i] = *((__u64 *)(current->thread.fpu.vxrs + i) + 1); 254 vxrs[i] = *((__u64 *)(current->thread.fpu.vxrs + i) + 1);
255 if (__copy_to_user(&sregs_ext->vxrs_low, vxrs, 255 if (__copy_to_user(&sregs_ext->vxrs_low, vxrs,
@@ -277,7 +277,7 @@ static int restore_sigregs_ext32(struct pt_regs *regs,
277 *(__u32 *)&regs->gprs[i] = gprs_high[i]; 277 *(__u32 *)&regs->gprs[i] = gprs_high[i];
278 278
279 /* Restore vector registers from signal stack */ 279 /* Restore vector registers from signal stack */
280 if (is_vx_task(current)) { 280 if (MACHINE_HAS_VX) {
281 if (__copy_from_user(vxrs, &sregs_ext->vxrs_low, 281 if (__copy_from_user(vxrs, &sregs_ext->vxrs_low,
282 sizeof(sregs_ext->vxrs_low)) || 282 sizeof(sregs_ext->vxrs_low)) ||
283 __copy_from_user(current->thread.fpu.vxrs + __NUM_VXRS_LOW, 283 __copy_from_user(current->thread.fpu.vxrs + __NUM_VXRS_LOW,
@@ -470,8 +470,7 @@ static int setup_rt_frame32(struct ksignal *ksig, sigset_t *set,
470 */ 470 */
471 uc_flags = UC_GPRS_HIGH; 471 uc_flags = UC_GPRS_HIGH;
472 if (MACHINE_HAS_VX) { 472 if (MACHINE_HAS_VX) {
473 if (is_vx_task(current)) 473 uc_flags |= UC_VXRS;
474 uc_flags |= UC_VXRS;
475 } else 474 } else
476 frame_size -= sizeof(frame->uc.uc_mcontext_ext.vxrs_low) + 475 frame_size -= sizeof(frame->uc.uc_mcontext_ext.vxrs_low) +
477 sizeof(frame->uc.uc_mcontext_ext.vxrs_high); 476 sizeof(frame->uc.uc_mcontext_ext.vxrs_high);
diff --git a/arch/s390/kernel/cpcmd.c b/arch/s390/kernel/cpcmd.c
index 199ec92ef4fe..7f768914fb4f 100644
--- a/arch/s390/kernel/cpcmd.c
+++ b/arch/s390/kernel/cpcmd.c
@@ -14,6 +14,7 @@
14#include <linux/spinlock.h> 14#include <linux/spinlock.h>
15#include <linux/stddef.h> 15#include <linux/stddef.h>
16#include <linux/string.h> 16#include <linux/string.h>
17#include <asm/diag.h>
17#include <asm/ebcdic.h> 18#include <asm/ebcdic.h>
18#include <asm/cpcmd.h> 19#include <asm/cpcmd.h>
19#include <asm/io.h> 20#include <asm/io.h>
@@ -70,6 +71,7 @@ int __cpcmd(const char *cmd, char *response, int rlen, int *response_code)
70 memcpy(cpcmd_buf, cmd, cmdlen); 71 memcpy(cpcmd_buf, cmd, cmdlen);
71 ASCEBC(cpcmd_buf, cmdlen); 72 ASCEBC(cpcmd_buf, cmdlen);
72 73
74 diag_stat_inc(DIAG_STAT_X008);
73 if (response) { 75 if (response) {
74 memset(response, 0, rlen); 76 memset(response, 0, rlen);
75 response_len = rlen; 77 response_len = rlen;
diff --git a/arch/s390/kernel/crash_dump.c b/arch/s390/kernel/crash_dump.c
index 0c6c01eb3613..171e09bb8ea2 100644
--- a/arch/s390/kernel/crash_dump.c
+++ b/arch/s390/kernel/crash_dump.c
@@ -32,16 +32,6 @@ static struct memblock_type oldmem_type = {
32 .regions = &oldmem_region, 32 .regions = &oldmem_region,
33}; 33};
34 34
35#define for_each_dump_mem_range(i, nid, p_start, p_end, p_nid) \
36 for (i = 0, __next_mem_range(&i, nid, MEMBLOCK_NONE, \
37 &memblock.physmem, \
38 &oldmem_type, p_start, \
39 p_end, p_nid); \
40 i != (u64)ULLONG_MAX; \
41 __next_mem_range(&i, nid, MEMBLOCK_NONE, &memblock.physmem,\
42 &oldmem_type, \
43 p_start, p_end, p_nid))
44
45struct dump_save_areas dump_save_areas; 35struct dump_save_areas dump_save_areas;
46 36
47/* 37/*
@@ -515,7 +505,8 @@ static int get_mem_chunk_cnt(void)
515 int cnt = 0; 505 int cnt = 0;
516 u64 idx; 506 u64 idx;
517 507
518 for_each_dump_mem_range(idx, NUMA_NO_NODE, NULL, NULL, NULL) 508 for_each_mem_range(idx, &memblock.physmem, &oldmem_type, NUMA_NO_NODE,
509 MEMBLOCK_NONE, NULL, NULL, NULL)
519 cnt++; 510 cnt++;
520 return cnt; 511 return cnt;
521} 512}
@@ -528,7 +519,8 @@ static void loads_init(Elf64_Phdr *phdr, u64 loads_offset)
528 phys_addr_t start, end; 519 phys_addr_t start, end;
529 u64 idx; 520 u64 idx;
530 521
531 for_each_dump_mem_range(idx, NUMA_NO_NODE, &start, &end, NULL) { 522 for_each_mem_range(idx, &memblock.physmem, &oldmem_type, NUMA_NO_NODE,
523 MEMBLOCK_NONE, &start, &end, NULL) {
532 phdr->p_filesz = end - start; 524 phdr->p_filesz = end - start;
533 phdr->p_type = PT_LOAD; 525 phdr->p_type = PT_LOAD;
534 phdr->p_offset = start; 526 phdr->p_offset = start;
diff --git a/arch/s390/kernel/diag.c b/arch/s390/kernel/diag.c
index 2f69243bf700..f98766ede4e1 100644
--- a/arch/s390/kernel/diag.c
+++ b/arch/s390/kernel/diag.c
@@ -6,12 +6,137 @@
6 */ 6 */
7 7
8#include <linux/module.h> 8#include <linux/module.h>
9#include <linux/cpu.h>
10#include <linux/seq_file.h>
11#include <linux/debugfs.h>
9#include <asm/diag.h> 12#include <asm/diag.h>
13#include <asm/trace/diag.h>
14
15struct diag_stat {
16 unsigned int counter[NR_DIAG_STAT];
17};
18
19static DEFINE_PER_CPU(struct diag_stat, diag_stat);
20
21struct diag_desc {
22 int code;
23 char *name;
24};
25
26static const struct diag_desc diag_map[NR_DIAG_STAT] = {
27 [DIAG_STAT_X008] = { .code = 0x008, .name = "Console Function" },
28 [DIAG_STAT_X00C] = { .code = 0x00c, .name = "Pseudo Timer" },
29 [DIAG_STAT_X010] = { .code = 0x010, .name = "Release Pages" },
30 [DIAG_STAT_X014] = { .code = 0x014, .name = "Spool File Services" },
31 [DIAG_STAT_X044] = { .code = 0x044, .name = "Voluntary Timeslice End" },
32 [DIAG_STAT_X064] = { .code = 0x064, .name = "NSS Manipulation" },
33 [DIAG_STAT_X09C] = { .code = 0x09c, .name = "Relinquish Timeslice" },
34 [DIAG_STAT_X0DC] = { .code = 0x0dc, .name = "Appldata Control" },
35 [DIAG_STAT_X204] = { .code = 0x204, .name = "Logical-CPU Utilization" },
36 [DIAG_STAT_X210] = { .code = 0x210, .name = "Device Information" },
37 [DIAG_STAT_X224] = { .code = 0x224, .name = "EBCDIC-Name Table" },
38 [DIAG_STAT_X250] = { .code = 0x250, .name = "Block I/O" },
39 [DIAG_STAT_X258] = { .code = 0x258, .name = "Page-Reference Services" },
40 [DIAG_STAT_X288] = { .code = 0x288, .name = "Time Bomb" },
41 [DIAG_STAT_X2C4] = { .code = 0x2c4, .name = "FTP Services" },
42 [DIAG_STAT_X2FC] = { .code = 0x2fc, .name = "Guest Performance Data" },
43 [DIAG_STAT_X304] = { .code = 0x304, .name = "Partition-Resource Service" },
44 [DIAG_STAT_X308] = { .code = 0x308, .name = "List-Directed IPL" },
45 [DIAG_STAT_X500] = { .code = 0x500, .name = "Virtio Service" },
46};
47
48static int show_diag_stat(struct seq_file *m, void *v)
49{
50 struct diag_stat *stat;
51 unsigned long n = (unsigned long) v - 1;
52 int cpu, prec, tmp;
53
54 get_online_cpus();
55 if (n == 0) {
56 seq_puts(m, " ");
57
58 for_each_online_cpu(cpu) {
59 prec = 10;
60 for (tmp = 10; cpu >= tmp; tmp *= 10)
61 prec--;
62 seq_printf(m, "%*s%d", prec, "CPU", cpu);
63 }
64 seq_putc(m, '\n');
65 } else if (n <= NR_DIAG_STAT) {
66 seq_printf(m, "diag %03x:", diag_map[n-1].code);
67 for_each_online_cpu(cpu) {
68 stat = &per_cpu(diag_stat, cpu);
69 seq_printf(m, " %10u", stat->counter[n-1]);
70 }
71 seq_printf(m, " %s\n", diag_map[n-1].name);
72 }
73 put_online_cpus();
74 return 0;
75}
76
77static void *show_diag_stat_start(struct seq_file *m, loff_t *pos)
78{
79 return *pos <= nr_cpu_ids ? (void *)((unsigned long) *pos + 1) : NULL;
80}
81
82static void *show_diag_stat_next(struct seq_file *m, void *v, loff_t *pos)
83{
84 ++*pos;
85 return show_diag_stat_start(m, pos);
86}
87
88static void show_diag_stat_stop(struct seq_file *m, void *v)
89{
90}
91
92static const struct seq_operations show_diag_stat_sops = {
93 .start = show_diag_stat_start,
94 .next = show_diag_stat_next,
95 .stop = show_diag_stat_stop,
96 .show = show_diag_stat,
97};
98
99static int show_diag_stat_open(struct inode *inode, struct file *file)
100{
101 return seq_open(file, &show_diag_stat_sops);
102}
103
104static const struct file_operations show_diag_stat_fops = {
105 .open = show_diag_stat_open,
106 .read = seq_read,
107 .llseek = seq_lseek,
108 .release = seq_release,
109};
110
111
112static int __init show_diag_stat_init(void)
113{
114 debugfs_create_file("diag_stat", 0400, NULL, NULL,
115 &show_diag_stat_fops);
116 return 0;
117}
118
119device_initcall(show_diag_stat_init);
120
121void diag_stat_inc(enum diag_stat_enum nr)
122{
123 this_cpu_inc(diag_stat.counter[nr]);
124 trace_diagnose(diag_map[nr].code);
125}
126EXPORT_SYMBOL(diag_stat_inc);
127
128void diag_stat_inc_norecursion(enum diag_stat_enum nr)
129{
130 this_cpu_inc(diag_stat.counter[nr]);
131 trace_diagnose_norecursion(diag_map[nr].code);
132}
133EXPORT_SYMBOL(diag_stat_inc_norecursion);
10 134
11/* 135/*
12 * Diagnose 14: Input spool file manipulation 136 * Diagnose 14: Input spool file manipulation
13 */ 137 */
14int diag14(unsigned long rx, unsigned long ry1, unsigned long subcode) 138static inline int __diag14(unsigned long rx, unsigned long ry1,
139 unsigned long subcode)
15{ 140{
16 register unsigned long _ry1 asm("2") = ry1; 141 register unsigned long _ry1 asm("2") = ry1;
17 register unsigned long _ry2 asm("3") = subcode; 142 register unsigned long _ry2 asm("3") = subcode;
@@ -29,6 +154,12 @@ int diag14(unsigned long rx, unsigned long ry1, unsigned long subcode)
29 154
30 return rc; 155 return rc;
31} 156}
157
158int diag14(unsigned long rx, unsigned long ry1, unsigned long subcode)
159{
160 diag_stat_inc(DIAG_STAT_X014);
161 return __diag14(rx, ry1, subcode);
162}
32EXPORT_SYMBOL(diag14); 163EXPORT_SYMBOL(diag14);
33 164
34/* 165/*
@@ -48,6 +179,7 @@ int diag210(struct diag210 *addr)
48 spin_lock_irqsave(&diag210_lock, flags); 179 spin_lock_irqsave(&diag210_lock, flags);
49 diag210_tmp = *addr; 180 diag210_tmp = *addr;
50 181
182 diag_stat_inc(DIAG_STAT_X210);
51 asm volatile( 183 asm volatile(
52 " lhi %0,-1\n" 184 " lhi %0,-1\n"
53 " sam31\n" 185 " sam31\n"
diff --git a/arch/s390/kernel/early.c b/arch/s390/kernel/early.c
index 549a73a4b543..3c31609df959 100644
--- a/arch/s390/kernel/early.c
+++ b/arch/s390/kernel/early.c
@@ -17,6 +17,7 @@
17#include <linux/pfn.h> 17#include <linux/pfn.h>
18#include <linux/uaccess.h> 18#include <linux/uaccess.h>
19#include <linux/kernel.h> 19#include <linux/kernel.h>
20#include <asm/diag.h>
20#include <asm/ebcdic.h> 21#include <asm/ebcdic.h>
21#include <asm/ipl.h> 22#include <asm/ipl.h>
22#include <asm/lowcore.h> 23#include <asm/lowcore.h>
@@ -286,6 +287,7 @@ static __init void detect_diag9c(void)
286 int rc; 287 int rc;
287 288
288 cpu_address = stap(); 289 cpu_address = stap();
290 diag_stat_inc(DIAG_STAT_X09C);
289 asm volatile( 291 asm volatile(
290 " diag %2,0,0x9c\n" 292 " diag %2,0,0x9c\n"
291 "0: la %0,0\n" 293 "0: la %0,0\n"
@@ -300,6 +302,7 @@ static __init void detect_diag44(void)
300{ 302{
301 int rc; 303 int rc;
302 304
305 diag_stat_inc(DIAG_STAT_X044);
303 asm volatile( 306 asm volatile(
304 " diag 0,0,0x44\n" 307 " diag 0,0,0x44\n"
305 "0: la %0,0\n" 308 "0: la %0,0\n"
@@ -326,9 +329,19 @@ static __init void detect_machine_facilities(void)
326 S390_lowcore.machine_flags |= MACHINE_FLAG_TE; 329 S390_lowcore.machine_flags |= MACHINE_FLAG_TE;
327 if (test_facility(51)) 330 if (test_facility(51))
328 S390_lowcore.machine_flags |= MACHINE_FLAG_TLB_LC; 331 S390_lowcore.machine_flags |= MACHINE_FLAG_TLB_LC;
329 if (test_facility(129)) 332 if (test_facility(129)) {
330 S390_lowcore.machine_flags |= MACHINE_FLAG_VX; 333 S390_lowcore.machine_flags |= MACHINE_FLAG_VX;
334 __ctl_set_bit(0, 17);
335 }
336}
337
338static int __init disable_vector_extension(char *str)
339{
340 S390_lowcore.machine_flags &= ~MACHINE_FLAG_VX;
341 __ctl_clear_bit(0, 17);
342 return 1;
331} 343}
344early_param("novx", disable_vector_extension);
332 345
333static int __init cad_setup(char *str) 346static int __init cad_setup(char *str)
334{ 347{
diff --git a/arch/s390/kernel/entry.S b/arch/s390/kernel/entry.S
index 582fe44ab07c..857b6526d298 100644
--- a/arch/s390/kernel/entry.S
+++ b/arch/s390/kernel/entry.S
@@ -20,8 +20,9 @@
20#include <asm/page.h> 20#include <asm/page.h>
21#include <asm/sigp.h> 21#include <asm/sigp.h>
22#include <asm/irq.h> 22#include <asm/irq.h>
23#include <asm/fpu-internal.h>
24#include <asm/vx-insn.h> 23#include <asm/vx-insn.h>
24#include <asm/setup.h>
25#include <asm/nmi.h>
25 26
26__PT_R0 = __PT_GPRS 27__PT_R0 = __PT_GPRS
27__PT_R1 = __PT_GPRS + 8 28__PT_R1 = __PT_GPRS + 8
@@ -139,6 +140,28 @@ _PIF_WORK = (_PIF_PER_TRAP)
139#endif 140#endif
140 .endm 141 .endm
141 142
143 /*
144 * The TSTMSK macro generates a test-under-mask instruction by
145 * calculating the memory offset for the specified mask value.
146 * Mask value can be any constant. The macro shifts the mask
147 * value to calculate the memory offset for the test-under-mask
148 * instruction.
149 */
150 .macro TSTMSK addr, mask, size=8, bytepos=0
151 .if (\bytepos < \size) && (\mask >> 8)
152 .if (\mask & 0xff)
153 .error "Mask exceeds byte boundary"
154 .endif
155 TSTMSK \addr, "(\mask >> 8)", \size, "(\bytepos + 1)"
156 .exitm
157 .endif
158 .ifeq \mask
159 .error "Mask must not be zero"
160 .endif
161 off = \size - \bytepos - 1
162 tm off+\addr, \mask
163 .endm
164
142 .section .kprobes.text, "ax" 165 .section .kprobes.text, "ax"
143 166
144/* 167/*
@@ -164,8 +187,11 @@ ENTRY(__switch_to)
164 stg %r15,__LC_KERNEL_STACK # store end of kernel stack 187 stg %r15,__LC_KERNEL_STACK # store end of kernel stack
165 lg %r15,__THREAD_ksp(%r1) # load kernel stack of next 188 lg %r15,__THREAD_ksp(%r1) # load kernel stack of next
166 lctl %c4,%c4,__TASK_pid(%r3) # load pid to control reg. 4 189 lctl %c4,%c4,__TASK_pid(%r3) # load pid to control reg. 4
167 mvc __LC_CURRENT_PID+4(4,%r0),__TASK_pid(%r3) # store pid of next 190 mvc __LC_CURRENT_PID(4,%r0),__TASK_pid(%r3) # store pid of next
168 lmg %r6,%r15,__SF_GPRS(%r15) # load gprs of next task 191 lmg %r6,%r15,__SF_GPRS(%r15) # load gprs of next task
192 TSTMSK __LC_MACHINE_FLAGS,MACHINE_FLAG_LPP
193 bzr %r14
194 .insn s,0xb2800000,__LC_LPP # set program parameter
169 br %r14 195 br %r14
170 196
171.L__critical_start: 197.L__critical_start:
@@ -180,8 +206,8 @@ ENTRY(sie64a)
180 stmg %r6,%r14,__SF_GPRS(%r15) # save kernel registers 206 stmg %r6,%r14,__SF_GPRS(%r15) # save kernel registers
181 stg %r2,__SF_EMPTY(%r15) # save control block pointer 207 stg %r2,__SF_EMPTY(%r15) # save control block pointer
182 stg %r3,__SF_EMPTY+8(%r15) # save guest register save area 208 stg %r3,__SF_EMPTY+8(%r15) # save guest register save area
183 xc __SF_EMPTY+16(16,%r15),__SF_EMPTY+16(%r15) # host id & reason 209 xc __SF_EMPTY+16(8,%r15),__SF_EMPTY+16(%r15) # reason code = 0
184 tm __LC_CPU_FLAGS+7,_CIF_FPU # load guest fp/vx registers ? 210 TSTMSK __LC_CPU_FLAGS,_CIF_FPU # load guest fp/vx registers ?
185 jno .Lsie_load_guest_gprs 211 jno .Lsie_load_guest_gprs
186 brasl %r14,load_fpu_regs # load guest fp/vx regs 212 brasl %r14,load_fpu_regs # load guest fp/vx regs
187.Lsie_load_guest_gprs: 213.Lsie_load_guest_gprs:
@@ -195,16 +221,9 @@ ENTRY(sie64a)
195 oi __SIE_PROG0C+3(%r14),1 # we are going into SIE now 221 oi __SIE_PROG0C+3(%r14),1 # we are going into SIE now
196 tm __SIE_PROG20+3(%r14),3 # last exit... 222 tm __SIE_PROG20+3(%r14),3 # last exit...
197 jnz .Lsie_skip 223 jnz .Lsie_skip
198 tm __LC_CPU_FLAGS+7,_CIF_FPU 224 TSTMSK __LC_CPU_FLAGS,_CIF_FPU
199 jo .Lsie_skip # exit if fp/vx regs changed 225 jo .Lsie_skip # exit if fp/vx regs changed
200 tm __LC_MACHINE_FLAGS+6,0x20 # MACHINE_FLAG_LPP
201 jz .Lsie_enter
202 .insn s,0xb2800000,__LC_CURRENT_PID # set guest id to pid
203.Lsie_enter:
204 sie 0(%r14) 226 sie 0(%r14)
205 tm __LC_MACHINE_FLAGS+6,0x20 # MACHINE_FLAG_LPP
206 jz .Lsie_skip
207 .insn s,0xb2800000,__SF_EMPTY+16(%r15)# set host id
208.Lsie_skip: 227.Lsie_skip:
209 ni __SIE_PROG0C+3(%r14),0xfe # no longer in SIE 228 ni __SIE_PROG0C+3(%r14),0xfe # no longer in SIE
210 lctlg %c1,%c1,__LC_USER_ASCE # load primary asce 229 lctlg %c1,%c1,__LC_USER_ASCE # load primary asce
@@ -221,11 +240,11 @@ sie_exit:
221 lg %r14,__SF_EMPTY+8(%r15) # load guest register save area 240 lg %r14,__SF_EMPTY+8(%r15) # load guest register save area
222 stmg %r0,%r13,0(%r14) # save guest gprs 0-13 241 stmg %r0,%r13,0(%r14) # save guest gprs 0-13
223 lmg %r6,%r14,__SF_GPRS(%r15) # restore kernel registers 242 lmg %r6,%r14,__SF_GPRS(%r15) # restore kernel registers
224 lg %r2,__SF_EMPTY+24(%r15) # return exit reason code 243 lg %r2,__SF_EMPTY+16(%r15) # return exit reason code
225 br %r14 244 br %r14
226.Lsie_fault: 245.Lsie_fault:
227 lghi %r14,-EFAULT 246 lghi %r14,-EFAULT
228 stg %r14,__SF_EMPTY+24(%r15) # set exit reason code 247 stg %r14,__SF_EMPTY+16(%r15) # set exit reason code
229 j sie_exit 248 j sie_exit
230 249
231 EX_TABLE(.Lrewind_pad,.Lsie_fault) 250 EX_TABLE(.Lrewind_pad,.Lsie_fault)
@@ -271,7 +290,7 @@ ENTRY(system_call)
271 stg %r2,__PT_ORIG_GPR2(%r11) 290 stg %r2,__PT_ORIG_GPR2(%r11)
272 stg %r7,STACK_FRAME_OVERHEAD(%r15) 291 stg %r7,STACK_FRAME_OVERHEAD(%r15)
273 lgf %r9,0(%r8,%r10) # get system call add. 292 lgf %r9,0(%r8,%r10) # get system call add.
274 tm __TI_flags+7(%r12),_TIF_TRACE 293 TSTMSK __TI_flags(%r12),_TIF_TRACE
275 jnz .Lsysc_tracesys 294 jnz .Lsysc_tracesys
276 basr %r14,%r9 # call sys_xxxx 295 basr %r14,%r9 # call sys_xxxx
277 stg %r2,__PT_R2(%r11) # store return value 296 stg %r2,__PT_R2(%r11) # store return value
@@ -279,11 +298,11 @@ ENTRY(system_call)
279.Lsysc_return: 298.Lsysc_return:
280 LOCKDEP_SYS_EXIT 299 LOCKDEP_SYS_EXIT
281.Lsysc_tif: 300.Lsysc_tif:
282 tm __PT_FLAGS+7(%r11),_PIF_WORK 301 TSTMSK __PT_FLAGS(%r11),_PIF_WORK
283 jnz .Lsysc_work 302 jnz .Lsysc_work
284 tm __TI_flags+7(%r12),_TIF_WORK 303 TSTMSK __TI_flags(%r12),_TIF_WORK
285 jnz .Lsysc_work # check for work 304 jnz .Lsysc_work # check for work
286 tm __LC_CPU_FLAGS+7,_CIF_WORK 305 TSTMSK __LC_CPU_FLAGS,_CIF_WORK
287 jnz .Lsysc_work 306 jnz .Lsysc_work
288.Lsysc_restore: 307.Lsysc_restore:
289 lg %r14,__LC_VDSO_PER_CPU 308 lg %r14,__LC_VDSO_PER_CPU
@@ -299,23 +318,23 @@ ENTRY(system_call)
299# One of the work bits is on. Find out which one. 318# One of the work bits is on. Find out which one.
300# 319#
301.Lsysc_work: 320.Lsysc_work:
302 tm __LC_CPU_FLAGS+7,_CIF_MCCK_PENDING 321 TSTMSK __LC_CPU_FLAGS,_CIF_MCCK_PENDING
303 jo .Lsysc_mcck_pending 322 jo .Lsysc_mcck_pending
304 tm __TI_flags+7(%r12),_TIF_NEED_RESCHED 323 TSTMSK __TI_flags(%r12),_TIF_NEED_RESCHED
305 jo .Lsysc_reschedule 324 jo .Lsysc_reschedule
306#ifdef CONFIG_UPROBES 325#ifdef CONFIG_UPROBES
307 tm __TI_flags+7(%r12),_TIF_UPROBE 326 TSTMSK __TI_flags(%r12),_TIF_UPROBE
308 jo .Lsysc_uprobe_notify 327 jo .Lsysc_uprobe_notify
309#endif 328#endif
310 tm __PT_FLAGS+7(%r11),_PIF_PER_TRAP 329 TSTMSK __PT_FLAGS(%r11),_PIF_PER_TRAP
311 jo .Lsysc_singlestep 330 jo .Lsysc_singlestep
312 tm __TI_flags+7(%r12),_TIF_SIGPENDING 331 TSTMSK __TI_flags(%r12),_TIF_SIGPENDING
313 jo .Lsysc_sigpending 332 jo .Lsysc_sigpending
314 tm __TI_flags+7(%r12),_TIF_NOTIFY_RESUME 333 TSTMSK __TI_flags(%r12),_TIF_NOTIFY_RESUME
315 jo .Lsysc_notify_resume 334 jo .Lsysc_notify_resume
316 tm __LC_CPU_FLAGS+7,_CIF_FPU 335 TSTMSK __LC_CPU_FLAGS,_CIF_FPU
317 jo .Lsysc_vxrs 336 jo .Lsysc_vxrs
318 tm __LC_CPU_FLAGS+7,_CIF_ASCE 337 TSTMSK __LC_CPU_FLAGS,_CIF_ASCE
319 jo .Lsysc_uaccess 338 jo .Lsysc_uaccess
320 j .Lsysc_return # beware of critical section cleanup 339 j .Lsysc_return # beware of critical section cleanup
321 340
@@ -354,7 +373,7 @@ ENTRY(system_call)
354.Lsysc_sigpending: 373.Lsysc_sigpending:
355 lgr %r2,%r11 # pass pointer to pt_regs 374 lgr %r2,%r11 # pass pointer to pt_regs
356 brasl %r14,do_signal 375 brasl %r14,do_signal
357 tm __PT_FLAGS+7(%r11),_PIF_SYSCALL 376 TSTMSK __PT_FLAGS(%r11),_PIF_SYSCALL
358 jno .Lsysc_return 377 jno .Lsysc_return
359 lmg %r2,%r7,__PT_R2(%r11) # load svc arguments 378 lmg %r2,%r7,__PT_R2(%r11) # load svc arguments
360 lg %r10,__TI_sysc_table(%r12) # address of system call table 379 lg %r10,__TI_sysc_table(%r12) # address of system call table
@@ -414,7 +433,7 @@ ENTRY(system_call)
414 basr %r14,%r9 # call sys_xxx 433 basr %r14,%r9 # call sys_xxx
415 stg %r2,__PT_R2(%r11) # store return value 434 stg %r2,__PT_R2(%r11) # store return value
416.Lsysc_tracenogo: 435.Lsysc_tracenogo:
417 tm __TI_flags+7(%r12),_TIF_TRACE 436 TSTMSK __TI_flags(%r12),_TIF_TRACE
418 jz .Lsysc_return 437 jz .Lsysc_return
419 lgr %r2,%r11 # pass pointer to pt_regs 438 lgr %r2,%r11 # pass pointer to pt_regs
420 larl %r14,.Lsysc_return 439 larl %r14,.Lsysc_return
@@ -544,6 +563,8 @@ ENTRY(io_int_handler)
544 stmg %r8,%r9,__PT_PSW(%r11) 563 stmg %r8,%r9,__PT_PSW(%r11)
545 mvc __PT_INT_CODE(12,%r11),__LC_SUBCHANNEL_ID 564 mvc __PT_INT_CODE(12,%r11),__LC_SUBCHANNEL_ID
546 xc __PT_FLAGS(8,%r11),__PT_FLAGS(%r11) 565 xc __PT_FLAGS(8,%r11),__PT_FLAGS(%r11)
566 TSTMSK __LC_CPU_FLAGS,_CIF_IGNORE_IRQ
567 jo .Lio_restore
547 TRACE_IRQS_OFF 568 TRACE_IRQS_OFF
548 xc __SF_BACKCHAIN(8,%r15),__SF_BACKCHAIN(%r15) 569 xc __SF_BACKCHAIN(8,%r15),__SF_BACKCHAIN(%r15)
549.Lio_loop: 570.Lio_loop:
@@ -554,7 +575,7 @@ ENTRY(io_int_handler)
554 lghi %r3,THIN_INTERRUPT 575 lghi %r3,THIN_INTERRUPT
555.Lio_call: 576.Lio_call:
556 brasl %r14,do_IRQ 577 brasl %r14,do_IRQ
557 tm __LC_MACHINE_FLAGS+6,0x10 # MACHINE_FLAG_LPAR 578 TSTMSK __LC_MACHINE_FLAGS,MACHINE_FLAG_LPAR
558 jz .Lio_return 579 jz .Lio_return
559 tpi 0 580 tpi 0
560 jz .Lio_return 581 jz .Lio_return
@@ -564,9 +585,9 @@ ENTRY(io_int_handler)
564 LOCKDEP_SYS_EXIT 585 LOCKDEP_SYS_EXIT
565 TRACE_IRQS_ON 586 TRACE_IRQS_ON
566.Lio_tif: 587.Lio_tif:
567 tm __TI_flags+7(%r12),_TIF_WORK 588 TSTMSK __TI_flags(%r12),_TIF_WORK
568 jnz .Lio_work # there is work to do (signals etc.) 589 jnz .Lio_work # there is work to do (signals etc.)
569 tm __LC_CPU_FLAGS+7,_CIF_WORK 590 TSTMSK __LC_CPU_FLAGS,_CIF_WORK
570 jnz .Lio_work 591 jnz .Lio_work
571.Lio_restore: 592.Lio_restore:
572 lg %r14,__LC_VDSO_PER_CPU 593 lg %r14,__LC_VDSO_PER_CPU
@@ -594,7 +615,7 @@ ENTRY(io_int_handler)
594 # check for preemptive scheduling 615 # check for preemptive scheduling
595 icm %r0,15,__TI_precount(%r12) 616 icm %r0,15,__TI_precount(%r12)
596 jnz .Lio_restore # preemption is disabled 617 jnz .Lio_restore # preemption is disabled
597 tm __TI_flags+7(%r12),_TIF_NEED_RESCHED 618 TSTMSK __TI_flags(%r12),_TIF_NEED_RESCHED
598 jno .Lio_restore 619 jno .Lio_restore
599 # switch to kernel stack 620 # switch to kernel stack
600 lg %r1,__PT_R15(%r11) 621 lg %r1,__PT_R15(%r11)
@@ -626,17 +647,17 @@ ENTRY(io_int_handler)
626# One of the work bits is on. Find out which one. 647# One of the work bits is on. Find out which one.
627# 648#
628.Lio_work_tif: 649.Lio_work_tif:
629 tm __LC_CPU_FLAGS+7,_CIF_MCCK_PENDING 650 TSTMSK __LC_CPU_FLAGS,_CIF_MCCK_PENDING
630 jo .Lio_mcck_pending 651 jo .Lio_mcck_pending
631 tm __TI_flags+7(%r12),_TIF_NEED_RESCHED 652 TSTMSK __TI_flags(%r12),_TIF_NEED_RESCHED
632 jo .Lio_reschedule 653 jo .Lio_reschedule
633 tm __TI_flags+7(%r12),_TIF_SIGPENDING 654 TSTMSK __TI_flags(%r12),_TIF_SIGPENDING
634 jo .Lio_sigpending 655 jo .Lio_sigpending
635 tm __TI_flags+7(%r12),_TIF_NOTIFY_RESUME 656 TSTMSK __TI_flags(%r12),_TIF_NOTIFY_RESUME
636 jo .Lio_notify_resume 657 jo .Lio_notify_resume
637 tm __LC_CPU_FLAGS+7,_CIF_FPU 658 TSTMSK __LC_CPU_FLAGS,_CIF_FPU
638 jo .Lio_vxrs 659 jo .Lio_vxrs
639 tm __LC_CPU_FLAGS+7,_CIF_ASCE 660 TSTMSK __LC_CPU_FLAGS,_CIF_ASCE
640 jo .Lio_uaccess 661 jo .Lio_uaccess
641 j .Lio_return # beware of critical section cleanup 662 j .Lio_return # beware of critical section cleanup
642 663
@@ -719,6 +740,8 @@ ENTRY(ext_int_handler)
719 mvc __PT_INT_PARM(4,%r11),__LC_EXT_PARAMS 740 mvc __PT_INT_PARM(4,%r11),__LC_EXT_PARAMS
720 mvc __PT_INT_PARM_LONG(8,%r11),0(%r1) 741 mvc __PT_INT_PARM_LONG(8,%r11),0(%r1)
721 xc __PT_FLAGS(8,%r11),__PT_FLAGS(%r11) 742 xc __PT_FLAGS(8,%r11),__PT_FLAGS(%r11)
743 TSTMSK __LC_CPU_FLAGS,_CIF_IGNORE_IRQ
744 jo .Lio_restore
722 TRACE_IRQS_OFF 745 TRACE_IRQS_OFF
723 xc __SF_BACKCHAIN(8,%r15),__SF_BACKCHAIN(%r15) 746 xc __SF_BACKCHAIN(8,%r15),__SF_BACKCHAIN(%r15)
724 lgr %r2,%r11 # pass pointer to pt_regs 747 lgr %r2,%r11 # pass pointer to pt_regs
@@ -748,27 +771,22 @@ ENTRY(psw_idle)
748 br %r14 771 br %r14
749.Lpsw_idle_end: 772.Lpsw_idle_end:
750 773
751/* Store floating-point controls and floating-point or vector extension 774/*
752 * registers instead. A critical section cleanup assures that the registers 775 * Store floating-point controls and floating-point or vector register
753 * are stored even if interrupted for some other work. The register %r2 776 * depending whether the vector facility is available. A critical section
754 * designates a struct fpu to store register contents. If the specified 777 * cleanup assures that the registers are stored even if interrupted for
755 * structure does not contain a register save area, the register store is 778 * some other work. The CIF_FPU flag is set to trigger a lazy restore
756 * omitted (see also comments in arch_dup_task_struct()). 779 * of the register contents at return from io or a system call.
757 *
758 * The CIF_FPU flag is set in any case. The CIF_FPU triggers a lazy restore
759 * of the register contents at system call or io return.
760 */ 780 */
761ENTRY(save_fpu_regs) 781ENTRY(save_fpu_regs)
762 lg %r2,__LC_CURRENT 782 lg %r2,__LC_CURRENT
763 aghi %r2,__TASK_thread 783 aghi %r2,__TASK_thread
764 tm __LC_CPU_FLAGS+7,_CIF_FPU 784 TSTMSK __LC_CPU_FLAGS,_CIF_FPU
765 bor %r14 785 bor %r14
766 stfpc __THREAD_FPU_fpc(%r2) 786 stfpc __THREAD_FPU_fpc(%r2)
767.Lsave_fpu_regs_fpc_end: 787.Lsave_fpu_regs_fpc_end:
768 lg %r3,__THREAD_FPU_regs(%r2) 788 lg %r3,__THREAD_FPU_regs(%r2)
769 ltgr %r3,%r3 789 TSTMSK __LC_MACHINE_FLAGS,MACHINE_FLAG_VX
770 jz .Lsave_fpu_regs_done # no save area -> set CIF_FPU
771 tm __THREAD_FPU_flags+3(%r2),FPU_USE_VX
772 jz .Lsave_fpu_regs_fp # no -> store FP regs 790 jz .Lsave_fpu_regs_fp # no -> store FP regs
773.Lsave_fpu_regs_vx_low: 791.Lsave_fpu_regs_vx_low:
774 VSTM %v0,%v15,0,%r3 # vstm 0,15,0(3) 792 VSTM %v0,%v15,0,%r3 # vstm 0,15,0(3)
@@ -797,41 +815,30 @@ ENTRY(save_fpu_regs)
797 br %r14 815 br %r14
798.Lsave_fpu_regs_end: 816.Lsave_fpu_regs_end:
799 817
800/* Load floating-point controls and floating-point or vector extension 818/*
801 * registers. A critical section cleanup assures that the register contents 819 * Load floating-point controls and floating-point or vector registers.
802 * are loaded even if interrupted for some other work. Depending on the saved 820 * A critical section cleanup assures that the register contents are
803 * FP/VX state, the vector-enablement control, CR0.46, is either set or cleared. 821 * loaded even if interrupted for some other work.
804 * 822 *
805 * There are special calling conventions to fit into sysc and io return work: 823 * There are special calling conventions to fit into sysc and io return work:
806 * %r15: <kernel stack> 824 * %r15: <kernel stack>
807 * The function requires: 825 * The function requires:
808 * %r4 and __SF_EMPTY+32(%r15) 826 * %r4
809 */ 827 */
810load_fpu_regs: 828load_fpu_regs:
811 lg %r4,__LC_CURRENT 829 lg %r4,__LC_CURRENT
812 aghi %r4,__TASK_thread 830 aghi %r4,__TASK_thread
813 tm __LC_CPU_FLAGS+7,_CIF_FPU 831 TSTMSK __LC_CPU_FLAGS,_CIF_FPU
814 bnor %r14 832 bnor %r14
815 lfpc __THREAD_FPU_fpc(%r4) 833 lfpc __THREAD_FPU_fpc(%r4)
816 stctg %c0,%c0,__SF_EMPTY+32(%r15) # store CR0 834 TSTMSK __LC_MACHINE_FLAGS,MACHINE_FLAG_VX
817 tm __THREAD_FPU_flags+3(%r4),FPU_USE_VX # VX-enabled task ?
818 lg %r4,__THREAD_FPU_regs(%r4) # %r4 <- reg save area 835 lg %r4,__THREAD_FPU_regs(%r4) # %r4 <- reg save area
819 jz .Lload_fpu_regs_fp_ctl # -> no VX, load FP regs 836 jz .Lload_fpu_regs_fp # -> no VX, load FP regs
820.Lload_fpu_regs_vx_ctl:
821 tm __SF_EMPTY+32+5(%r15),2 # test VX control
822 jo .Lload_fpu_regs_vx
823 oi __SF_EMPTY+32+5(%r15),2 # set VX control
824 lctlg %c0,%c0,__SF_EMPTY+32(%r15)
825.Lload_fpu_regs_vx: 837.Lload_fpu_regs_vx:
826 VLM %v0,%v15,0,%r4 838 VLM %v0,%v15,0,%r4
827.Lload_fpu_regs_vx_high: 839.Lload_fpu_regs_vx_high:
828 VLM %v16,%v31,256,%r4 840 VLM %v16,%v31,256,%r4
829 j .Lload_fpu_regs_done 841 j .Lload_fpu_regs_done
830.Lload_fpu_regs_fp_ctl:
831 tm __SF_EMPTY+32+5(%r15),2 # test VX control
832 jz .Lload_fpu_regs_fp
833 ni __SF_EMPTY+32+5(%r15),253 # clear VX control
834 lctlg %c0,%c0,__SF_EMPTY+32(%r15)
835.Lload_fpu_regs_fp: 842.Lload_fpu_regs_fp:
836 ld 0,0(%r4) 843 ld 0,0(%r4)
837 ld 1,8(%r4) 844 ld 1,8(%r4)
@@ -854,16 +861,6 @@ load_fpu_regs:
854 br %r14 861 br %r14
855.Lload_fpu_regs_end: 862.Lload_fpu_regs_end:
856 863
857/* Test and set the vector enablement control in CR0.46 */
858ENTRY(__ctl_set_vx)
859 stctg %c0,%c0,__SF_EMPTY(%r15)
860 tm __SF_EMPTY+5(%r15),2
861 bor %r14
862 oi __SF_EMPTY+5(%r15),2
863 lctlg %c0,%c0,__SF_EMPTY(%r15)
864 br %r14
865.L__ctl_set_vx_end:
866
867.L__critical_end: 864.L__critical_end:
868 865
869/* 866/*
@@ -878,11 +875,11 @@ ENTRY(mcck_int_handler)
878 lg %r12,__LC_THREAD_INFO 875 lg %r12,__LC_THREAD_INFO
879 larl %r13,cleanup_critical 876 larl %r13,cleanup_critical
880 lmg %r8,%r9,__LC_MCK_OLD_PSW 877 lmg %r8,%r9,__LC_MCK_OLD_PSW
881 tm __LC_MCCK_CODE,0x80 # system damage? 878 TSTMSK __LC_MCCK_CODE,MCCK_CODE_SYSTEM_DAMAGE
882 jo .Lmcck_panic # yes -> rest of mcck code invalid 879 jo .Lmcck_panic # yes -> rest of mcck code invalid
883 lghi %r14,__LC_CPU_TIMER_SAVE_AREA 880 lghi %r14,__LC_CPU_TIMER_SAVE_AREA
884 mvc __LC_MCCK_ENTER_TIMER(8),0(%r14) 881 mvc __LC_MCCK_ENTER_TIMER(8),0(%r14)
885 tm __LC_MCCK_CODE+5,0x02 # stored cpu timer value valid? 882 TSTMSK __LC_MCCK_CODE,MCCK_CODE_CPU_TIMER_VALID
886 jo 3f 883 jo 3f
887 la %r14,__LC_SYNC_ENTER_TIMER 884 la %r14,__LC_SYNC_ENTER_TIMER
888 clc 0(8,%r14),__LC_ASYNC_ENTER_TIMER 885 clc 0(8,%r14),__LC_ASYNC_ENTER_TIMER
@@ -896,7 +893,7 @@ ENTRY(mcck_int_handler)
896 la %r14,__LC_LAST_UPDATE_TIMER 893 la %r14,__LC_LAST_UPDATE_TIMER
8972: spt 0(%r14) 8942: spt 0(%r14)
898 mvc __LC_MCCK_ENTER_TIMER(8),0(%r14) 895 mvc __LC_MCCK_ENTER_TIMER(8),0(%r14)
8993: tm __LC_MCCK_CODE+2,0x09 # mwp + ia of old psw valid? 8963: TSTMSK __LC_MCCK_CODE,(MCCK_CODE_PSW_MWP_VALID|MCCK_CODE_PSW_IA_VALID)
900 jno .Lmcck_panic # no -> skip cleanup critical 897 jno .Lmcck_panic # no -> skip cleanup critical
901 SWITCH_ASYNC __LC_GPREGS_SAVE_AREA+64,__LC_MCCK_ENTER_TIMER 898 SWITCH_ASYNC __LC_GPREGS_SAVE_AREA+64,__LC_MCCK_ENTER_TIMER
902.Lmcck_skip: 899.Lmcck_skip:
@@ -916,7 +913,7 @@ ENTRY(mcck_int_handler)
916 la %r11,STACK_FRAME_OVERHEAD(%r1) 913 la %r11,STACK_FRAME_OVERHEAD(%r1)
917 lgr %r15,%r1 914 lgr %r15,%r1
918 ssm __LC_PGM_NEW_PSW # turn dat on, keep irqs off 915 ssm __LC_PGM_NEW_PSW # turn dat on, keep irqs off
919 tm __LC_CPU_FLAGS+7,_CIF_MCCK_PENDING 916 TSTMSK __LC_CPU_FLAGS,_CIF_MCCK_PENDING
920 jno .Lmcck_return 917 jno .Lmcck_return
921 TRACE_IRQS_OFF 918 TRACE_IRQS_OFF
922 brasl %r14,s390_handle_mcck 919 brasl %r14,s390_handle_mcck
@@ -941,7 +938,10 @@ ENTRY(mcck_int_handler)
941# PSW restart interrupt handler 938# PSW restart interrupt handler
942# 939#
943ENTRY(restart_int_handler) 940ENTRY(restart_int_handler)
944 stg %r15,__LC_SAVE_AREA_RESTART 941 TSTMSK __LC_MACHINE_FLAGS,MACHINE_FLAG_LPP
942 jz 0f
943 .insn s,0xb2800000,__LC_LPP
9440: stg %r15,__LC_SAVE_AREA_RESTART
945 lg %r15,__LC_RESTART_STACK 945 lg %r15,__LC_RESTART_STACK
946 aghi %r15,-__PT_SIZE # create pt_regs on stack 946 aghi %r15,-__PT_SIZE # create pt_regs on stack
947 xc 0(__PT_SIZE,%r15),0(%r15) 947 xc 0(__PT_SIZE,%r15),0(%r15)
@@ -1019,10 +1019,6 @@ cleanup_critical:
1019 jl 0f 1019 jl 0f
1020 clg %r9,BASED(.Lcleanup_table+104) # .Lload_fpu_regs_end 1020 clg %r9,BASED(.Lcleanup_table+104) # .Lload_fpu_regs_end
1021 jl .Lcleanup_load_fpu_regs 1021 jl .Lcleanup_load_fpu_regs
1022 clg %r9,BASED(.Lcleanup_table+112) # __ctl_set_vx
1023 jl 0f
1024 clg %r9,BASED(.Lcleanup_table+120) # .L__ctl_set_vx_end
1025 jl .Lcleanup___ctl_set_vx
10260: br %r14 10220: br %r14
1027 1023
1028 .align 8 1024 .align 8
@@ -1041,8 +1037,6 @@ cleanup_critical:
1041 .quad .Lsave_fpu_regs_end 1037 .quad .Lsave_fpu_regs_end
1042 .quad load_fpu_regs 1038 .quad load_fpu_regs
1043 .quad .Lload_fpu_regs_end 1039 .quad .Lload_fpu_regs_end
1044 .quad __ctl_set_vx
1045 .quad .L__ctl_set_vx_end
1046 1040
1047#if IS_ENABLED(CONFIG_KVM) 1041#if IS_ENABLED(CONFIG_KVM)
1048.Lcleanup_table_sie: 1042.Lcleanup_table_sie:
@@ -1051,10 +1045,7 @@ cleanup_critical:
1051 1045
1052.Lcleanup_sie: 1046.Lcleanup_sie:
1053 lg %r9,__SF_EMPTY(%r15) # get control block pointer 1047 lg %r9,__SF_EMPTY(%r15) # get control block pointer
1054 tm __LC_MACHINE_FLAGS+6,0x20 # MACHINE_FLAG_LPP 1048 ni __SIE_PROG0C+3(%r9),0xfe # no longer in SIE
1055 jz 0f
1056 .insn s,0xb2800000,__SF_EMPTY+16(%r15)# set host id
10570: ni __SIE_PROG0C+3(%r9),0xfe # no longer in SIE
1058 lctlg %c1,%c1,__LC_USER_ASCE # load primary asce 1049 lctlg %c1,%c1,__LC_USER_ASCE # load primary asce
1059 larl %r9,sie_exit # skip forward to sie_exit 1050 larl %r9,sie_exit # skip forward to sie_exit
1060 br %r14 1051 br %r14
@@ -1206,7 +1197,7 @@ cleanup_critical:
1206 .quad .Lpsw_idle_lpsw 1197 .quad .Lpsw_idle_lpsw
1207 1198
1208.Lcleanup_save_fpu_regs: 1199.Lcleanup_save_fpu_regs:
1209 tm __LC_CPU_FLAGS+7,_CIF_FPU 1200 TSTMSK __LC_CPU_FLAGS,_CIF_FPU
1210 bor %r14 1201 bor %r14
1211 clg %r9,BASED(.Lcleanup_save_fpu_regs_done) 1202 clg %r9,BASED(.Lcleanup_save_fpu_regs_done)
1212 jhe 5f 1203 jhe 5f
@@ -1224,9 +1215,7 @@ cleanup_critical:
1224 stfpc __THREAD_FPU_fpc(%r2) 1215 stfpc __THREAD_FPU_fpc(%r2)
12251: # Load register save area and check if VX is active 12161: # Load register save area and check if VX is active
1226 lg %r3,__THREAD_FPU_regs(%r2) 1217 lg %r3,__THREAD_FPU_regs(%r2)
1227 ltgr %r3,%r3 1218 TSTMSK __LC_MACHINE_FLAGS,MACHINE_FLAG_VX
1228 jz 5f # no save area -> set CIF_FPU
1229 tm __THREAD_FPU_flags+3(%r2),FPU_USE_VX
1230 jz 4f # no VX -> store FP regs 1219 jz 4f # no VX -> store FP regs
12312: # Store vector registers (V0-V15) 12202: # Store vector registers (V0-V15)
1232 VSTM %v0,%v15,0,%r3 # vstm 0,15,0(3) 1221 VSTM %v0,%v15,0,%r3 # vstm 0,15,0(3)
@@ -1266,43 +1255,27 @@ cleanup_critical:
1266 .quad .Lsave_fpu_regs_done 1255 .quad .Lsave_fpu_regs_done
1267 1256
1268.Lcleanup_load_fpu_regs: 1257.Lcleanup_load_fpu_regs:
1269 tm __LC_CPU_FLAGS+7,_CIF_FPU 1258 TSTMSK __LC_CPU_FLAGS,_CIF_FPU
1270 bnor %r14 1259 bnor %r14
1271 clg %r9,BASED(.Lcleanup_load_fpu_regs_done) 1260 clg %r9,BASED(.Lcleanup_load_fpu_regs_done)
1272 jhe 1f 1261 jhe 1f
1273 clg %r9,BASED(.Lcleanup_load_fpu_regs_fp) 1262 clg %r9,BASED(.Lcleanup_load_fpu_regs_fp)
1274 jhe 2f 1263 jhe 2f
1275 clg %r9,BASED(.Lcleanup_load_fpu_regs_fp_ctl)
1276 jhe 3f
1277 clg %r9,BASED(.Lcleanup_load_fpu_regs_vx_high) 1264 clg %r9,BASED(.Lcleanup_load_fpu_regs_vx_high)
1278 jhe 4f 1265 jhe 3f
1279 clg %r9,BASED(.Lcleanup_load_fpu_regs_vx) 1266 clg %r9,BASED(.Lcleanup_load_fpu_regs_vx)
1280 jhe 5f 1267 jhe 4f
1281 clg %r9,BASED(.Lcleanup_load_fpu_regs_vx_ctl)
1282 jhe 6f
1283 lg %r4,__LC_CURRENT 1268 lg %r4,__LC_CURRENT
1284 aghi %r4,__TASK_thread 1269 aghi %r4,__TASK_thread
1285 lfpc __THREAD_FPU_fpc(%r4) 1270 lfpc __THREAD_FPU_fpc(%r4)
1286 tm __THREAD_FPU_flags+3(%r4),FPU_USE_VX # VX-enabled task ? 1271 TSTMSK __LC_MACHINE_FLAGS,MACHINE_FLAG_VX
1287 lg %r4,__THREAD_FPU_regs(%r4) # %r4 <- reg save area 1272 lg %r4,__THREAD_FPU_regs(%r4) # %r4 <- reg save area
1288 jz 3f # -> no VX, load FP regs 1273 jz 2f # -> no VX, load FP regs
12896: # Set VX-enablement control 12744: # Load V0 ..V15 registers
1290 stctg %c0,%c0,__SF_EMPTY+32(%r15) # store CR0
1291 tm __SF_EMPTY+32+5(%r15),2 # test VX control
1292 jo 5f
1293 oi __SF_EMPTY+32+5(%r15),2 # set VX control
1294 lctlg %c0,%c0,__SF_EMPTY+32(%r15)
12955: # Load V0 ..V15 registers
1296 VLM %v0,%v15,0,%r4 1275 VLM %v0,%v15,0,%r4
12974: # Load V16..V31 registers 12763: # Load V16..V31 registers
1298 VLM %v16,%v31,256,%r4 1277 VLM %v16,%v31,256,%r4
1299 j 1f 1278 j 1f
13003: # Clear VX-enablement control for FP
1301 stctg %c0,%c0,__SF_EMPTY+32(%r15) # store CR0
1302 tm __SF_EMPTY+32+5(%r15),2 # test VX control
1303 jz 2f
1304 ni __SF_EMPTY+32+5(%r15),253 # clear VX control
1305 lctlg %c0,%c0,__SF_EMPTY+32(%r15)
13062: # Load floating-point registers 12792: # Load floating-point registers
1307 ld 0,0(%r4) 1280 ld 0,0(%r4)
1308 ld 1,8(%r4) 1281 ld 1,8(%r4)
@@ -1324,28 +1297,15 @@ cleanup_critical:
1324 ni __LC_CPU_FLAGS+7,255-_CIF_FPU 1297 ni __LC_CPU_FLAGS+7,255-_CIF_FPU
1325 lg %r9,48(%r11) # return from load_fpu_regs 1298 lg %r9,48(%r11) # return from load_fpu_regs
1326 br %r14 1299 br %r14
1327.Lcleanup_load_fpu_regs_vx_ctl:
1328 .quad .Lload_fpu_regs_vx_ctl
1329.Lcleanup_load_fpu_regs_vx: 1300.Lcleanup_load_fpu_regs_vx:
1330 .quad .Lload_fpu_regs_vx 1301 .quad .Lload_fpu_regs_vx
1331.Lcleanup_load_fpu_regs_vx_high: 1302.Lcleanup_load_fpu_regs_vx_high:
1332 .quad .Lload_fpu_regs_vx_high 1303 .quad .Lload_fpu_regs_vx_high
1333.Lcleanup_load_fpu_regs_fp_ctl:
1334 .quad .Lload_fpu_regs_fp_ctl
1335.Lcleanup_load_fpu_regs_fp: 1304.Lcleanup_load_fpu_regs_fp:
1336 .quad .Lload_fpu_regs_fp 1305 .quad .Lload_fpu_regs_fp
1337.Lcleanup_load_fpu_regs_done: 1306.Lcleanup_load_fpu_regs_done:
1338 .quad .Lload_fpu_regs_done 1307 .quad .Lload_fpu_regs_done
1339 1308
1340.Lcleanup___ctl_set_vx:
1341 stctg %c0,%c0,__SF_EMPTY(%r15)
1342 tm __SF_EMPTY+5(%r15),2
1343 bor %r14
1344 oi __SF_EMPTY+5(%r15),2
1345 lctlg %c0,%c0,__SF_EMPTY(%r15)
1346 lg %r9,48(%r11) # return from __ctl_set_vx
1347 br %r14
1348
1349/* 1309/*
1350 * Integer constants 1310 * Integer constants
1351 */ 1311 */
diff --git a/arch/s390/kernel/entry.h b/arch/s390/kernel/entry.h
index 834df047d35f..b7019ab74070 100644
--- a/arch/s390/kernel/entry.h
+++ b/arch/s390/kernel/entry.h
@@ -16,13 +16,10 @@ void io_int_handler(void);
16void mcck_int_handler(void); 16void mcck_int_handler(void);
17void restart_int_handler(void); 17void restart_int_handler(void);
18void restart_call_handler(void); 18void restart_call_handler(void);
19void psw_idle(struct s390_idle_data *, unsigned long);
20 19
21asmlinkage long do_syscall_trace_enter(struct pt_regs *regs); 20asmlinkage long do_syscall_trace_enter(struct pt_regs *regs);
22asmlinkage void do_syscall_trace_exit(struct pt_regs *regs); 21asmlinkage void do_syscall_trace_exit(struct pt_regs *regs);
23 22
24int alloc_vector_registers(struct task_struct *tsk);
25
26void do_protection_exception(struct pt_regs *regs); 23void do_protection_exception(struct pt_regs *regs);
27void do_dat_exception(struct pt_regs *regs); 24void do_dat_exception(struct pt_regs *regs);
28 25
diff --git a/arch/s390/kernel/head64.S b/arch/s390/kernel/head64.S
index d7c00507568a..58b719fa8067 100644
--- a/arch/s390/kernel/head64.S
+++ b/arch/s390/kernel/head64.S
@@ -16,7 +16,12 @@
16 16
17__HEAD 17__HEAD
18ENTRY(startup_continue) 18ENTRY(startup_continue)
19 larl %r1,sched_clock_base_cc 19 tm __LC_STFL_FAC_LIST+6,0x80 # LPP available ?
20 jz 0f
21 xc __LC_LPP+1(7,0),__LC_LPP+1 # clear lpp and current_pid
22 mvi __LC_LPP,0x80 # and set LPP_MAGIC
23 .insn s,0xb2800000,__LC_LPP # load program parameter
240: larl %r1,sched_clock_base_cc
20 mvc 0(8,%r1),__LC_LAST_UPDATE_CLOCK 25 mvc 0(8,%r1),__LC_LAST_UPDATE_CLOCK
21 larl %r13,.LPG1 # get base 26 larl %r13,.LPG1 # get base
22 lctlg %c0,%c15,.Lctl-.LPG1(%r13) # load control registers 27 lctlg %c0,%c15,.Lctl-.LPG1(%r13) # load control registers
diff --git a/arch/s390/kernel/ipl.c b/arch/s390/kernel/ipl.c
index 52fbef91d1d9..f6d8acd7e136 100644
--- a/arch/s390/kernel/ipl.c
+++ b/arch/s390/kernel/ipl.c
@@ -17,6 +17,7 @@
17#include <linux/gfp.h> 17#include <linux/gfp.h>
18#include <linux/crash_dump.h> 18#include <linux/crash_dump.h>
19#include <linux/debug_locks.h> 19#include <linux/debug_locks.h>
20#include <asm/diag.h>
20#include <asm/ipl.h> 21#include <asm/ipl.h>
21#include <asm/smp.h> 22#include <asm/smp.h>
22#include <asm/setup.h> 23#include <asm/setup.h>
@@ -165,7 +166,7 @@ static struct ipl_parameter_block *dump_block_ccw;
165 166
166static struct sclp_ipl_info sclp_ipl_info; 167static struct sclp_ipl_info sclp_ipl_info;
167 168
168int diag308(unsigned long subcode, void *addr) 169static inline int __diag308(unsigned long subcode, void *addr)
169{ 170{
170 register unsigned long _addr asm("0") = (unsigned long) addr; 171 register unsigned long _addr asm("0") = (unsigned long) addr;
171 register unsigned long _rc asm("1") = 0; 172 register unsigned long _rc asm("1") = 0;
@@ -178,6 +179,12 @@ int diag308(unsigned long subcode, void *addr)
178 : "d" (subcode) : "cc", "memory"); 179 : "d" (subcode) : "cc", "memory");
179 return _rc; 180 return _rc;
180} 181}
182
183int diag308(unsigned long subcode, void *addr)
184{
185 diag_stat_inc(DIAG_STAT_X308);
186 return __diag308(subcode, addr);
187}
181EXPORT_SYMBOL_GPL(diag308); 188EXPORT_SYMBOL_GPL(diag308);
182 189
183/* SYSFS */ 190/* SYSFS */
diff --git a/arch/s390/kernel/irq.c b/arch/s390/kernel/irq.c
index e9d9addfaa44..f41d5208aaf7 100644
--- a/arch/s390/kernel/irq.c
+++ b/arch/s390/kernel/irq.c
@@ -69,7 +69,6 @@ static const struct irq_class irqclass_sub_desc[] = {
69 {.irq = IRQEXT_IUC, .name = "IUC", .desc = "[EXT] IUCV"}, 69 {.irq = IRQEXT_IUC, .name = "IUC", .desc = "[EXT] IUCV"},
70 {.irq = IRQEXT_CMS, .name = "CMS", .desc = "[EXT] CPU-Measurement: Sampling"}, 70 {.irq = IRQEXT_CMS, .name = "CMS", .desc = "[EXT] CPU-Measurement: Sampling"},
71 {.irq = IRQEXT_CMC, .name = "CMC", .desc = "[EXT] CPU-Measurement: Counter"}, 71 {.irq = IRQEXT_CMC, .name = "CMC", .desc = "[EXT] CPU-Measurement: Counter"},
72 {.irq = IRQEXT_CMR, .name = "CMR", .desc = "[EXT] CPU-Measurement: RI"},
73 {.irq = IRQEXT_FTP, .name = "FTP", .desc = "[EXT] HMC FTP Service"}, 72 {.irq = IRQEXT_FTP, .name = "FTP", .desc = "[EXT] HMC FTP Service"},
74 {.irq = IRQIO_CIO, .name = "CIO", .desc = "[I/O] Common I/O Layer Interrupt"}, 73 {.irq = IRQIO_CIO, .name = "CIO", .desc = "[I/O] Common I/O Layer Interrupt"},
75 {.irq = IRQIO_QAI, .name = "QAI", .desc = "[I/O] QDIO Adapter Interrupt"}, 74 {.irq = IRQIO_QAI, .name = "QAI", .desc = "[I/O] QDIO Adapter Interrupt"},
diff --git a/arch/s390/kernel/nmi.c b/arch/s390/kernel/nmi.c
index 0ae6f8e74840..07302ce37648 100644
--- a/arch/s390/kernel/nmi.c
+++ b/arch/s390/kernel/nmi.c
@@ -21,19 +21,20 @@
21#include <asm/nmi.h> 21#include <asm/nmi.h>
22#include <asm/crw.h> 22#include <asm/crw.h>
23#include <asm/switch_to.h> 23#include <asm/switch_to.h>
24#include <asm/fpu-internal.h>
25#include <asm/ctl_reg.h> 24#include <asm/ctl_reg.h>
26 25
27struct mcck_struct { 26struct mcck_struct {
28 int kill_task; 27 unsigned int kill_task : 1;
29 int channel_report; 28 unsigned int channel_report : 1;
30 int warning; 29 unsigned int warning : 1;
31 unsigned long long mcck_code; 30 unsigned int etr_queue : 1;
31 unsigned int stp_queue : 1;
32 unsigned long mcck_code;
32}; 33};
33 34
34static DEFINE_PER_CPU(struct mcck_struct, cpu_mcck); 35static DEFINE_PER_CPU(struct mcck_struct, cpu_mcck);
35 36
36static void s390_handle_damage(char *msg) 37static void s390_handle_damage(void)
37{ 38{
38 smp_send_stop(); 39 smp_send_stop();
39 disabled_wait((unsigned long) __builtin_return_address(0)); 40 disabled_wait((unsigned long) __builtin_return_address(0));
@@ -81,10 +82,14 @@ void s390_handle_mcck(void)
81 if (xchg(&mchchk_wng_posted, 1) == 0) 82 if (xchg(&mchchk_wng_posted, 1) == 0)
82 kill_cad_pid(SIGPWR, 1); 83 kill_cad_pid(SIGPWR, 1);
83 } 84 }
85 if (mcck.etr_queue)
86 etr_queue_work();
87 if (mcck.stp_queue)
88 stp_queue_work();
84 if (mcck.kill_task) { 89 if (mcck.kill_task) {
85 local_irq_enable(); 90 local_irq_enable();
86 printk(KERN_EMERG "mcck: Terminating task because of machine " 91 printk(KERN_EMERG "mcck: Terminating task because of machine "
87 "malfunction (code 0x%016llx).\n", mcck.mcck_code); 92 "malfunction (code 0x%016lx).\n", mcck.mcck_code);
88 printk(KERN_EMERG "mcck: task: %s, pid: %d.\n", 93 printk(KERN_EMERG "mcck: task: %s, pid: %d.\n",
89 current->comm, current->pid); 94 current->comm, current->pid);
90 do_exit(SIGSEGV); 95 do_exit(SIGSEGV);
@@ -96,7 +101,7 @@ EXPORT_SYMBOL_GPL(s390_handle_mcck);
96 * returns 0 if all registers could be validated 101 * returns 0 if all registers could be validated
97 * returns 1 otherwise 102 * returns 1 otherwise
98 */ 103 */
99static int notrace s390_revalidate_registers(struct mci *mci) 104static int notrace s390_validate_registers(union mci mci)
100{ 105{
101 int kill_task; 106 int kill_task;
102 u64 zero; 107 u64 zero;
@@ -105,14 +110,14 @@ static int notrace s390_revalidate_registers(struct mci *mci)
105 kill_task = 0; 110 kill_task = 0;
106 zero = 0; 111 zero = 0;
107 112
108 if (!mci->gr) { 113 if (!mci.gr) {
109 /* 114 /*
110 * General purpose registers couldn't be restored and have 115 * General purpose registers couldn't be restored and have
111 * unknown contents. Process needs to be terminated. 116 * unknown contents. Process needs to be terminated.
112 */ 117 */
113 kill_task = 1; 118 kill_task = 1;
114 } 119 }
115 if (!mci->fp) { 120 if (!mci.fp) {
116 /* 121 /*
117 * Floating point registers can't be restored and 122 * Floating point registers can't be restored and
118 * therefore the process needs to be terminated. 123 * therefore the process needs to be terminated.
@@ -121,7 +126,7 @@ static int notrace s390_revalidate_registers(struct mci *mci)
121 } 126 }
122 fpt_save_area = &S390_lowcore.floating_pt_save_area; 127 fpt_save_area = &S390_lowcore.floating_pt_save_area;
123 fpt_creg_save_area = &S390_lowcore.fpt_creg_save_area; 128 fpt_creg_save_area = &S390_lowcore.fpt_creg_save_area;
124 if (!mci->fc) { 129 if (!mci.fc) {
125 /* 130 /*
126 * Floating point control register can't be restored. 131 * Floating point control register can't be restored.
127 * Task will be terminated. 132 * Task will be terminated.
@@ -132,7 +137,7 @@ static int notrace s390_revalidate_registers(struct mci *mci)
132 asm volatile("lfpc 0(%0)" : : "a" (fpt_creg_save_area)); 137 asm volatile("lfpc 0(%0)" : : "a" (fpt_creg_save_area));
133 138
134 if (!MACHINE_HAS_VX) { 139 if (!MACHINE_HAS_VX) {
135 /* Revalidate floating point registers */ 140 /* Validate floating point registers */
136 asm volatile( 141 asm volatile(
137 " ld 0,0(%0)\n" 142 " ld 0,0(%0)\n"
138 " ld 1,8(%0)\n" 143 " ld 1,8(%0)\n"
@@ -152,10 +157,10 @@ static int notrace s390_revalidate_registers(struct mci *mci)
152 " ld 15,120(%0)\n" 157 " ld 15,120(%0)\n"
153 : : "a" (fpt_save_area)); 158 : : "a" (fpt_save_area));
154 } else { 159 } else {
155 /* Revalidate vector registers */ 160 /* Validate vector registers */
156 union ctlreg0 cr0; 161 union ctlreg0 cr0;
157 162
158 if (!mci->vr) { 163 if (!mci.vr) {
159 /* 164 /*
160 * Vector registers can't be restored and therefore 165 * Vector registers can't be restored and therefore
161 * the process needs to be terminated. 166 * the process needs to be terminated.
@@ -173,38 +178,38 @@ static int notrace s390_revalidate_registers(struct mci *mci)
173 &S390_lowcore.vector_save_area) : "1"); 178 &S390_lowcore.vector_save_area) : "1");
174 __ctl_load(S390_lowcore.cregs_save_area[0], 0, 0); 179 __ctl_load(S390_lowcore.cregs_save_area[0], 0, 0);
175 } 180 }
176 /* Revalidate access registers */ 181 /* Validate access registers */
177 asm volatile( 182 asm volatile(
178 " lam 0,15,0(%0)" 183 " lam 0,15,0(%0)"
179 : : "a" (&S390_lowcore.access_regs_save_area)); 184 : : "a" (&S390_lowcore.access_regs_save_area));
180 if (!mci->ar) { 185 if (!mci.ar) {
181 /* 186 /*
182 * Access registers have unknown contents. 187 * Access registers have unknown contents.
183 * Terminating task. 188 * Terminating task.
184 */ 189 */
185 kill_task = 1; 190 kill_task = 1;
186 } 191 }
187 /* Revalidate control registers */ 192 /* Validate control registers */
188 if (!mci->cr) { 193 if (!mci.cr) {
189 /* 194 /*
190 * Control registers have unknown contents. 195 * Control registers have unknown contents.
191 * Can't recover and therefore stopping machine. 196 * Can't recover and therefore stopping machine.
192 */ 197 */
193 s390_handle_damage("invalid control registers."); 198 s390_handle_damage();
194 } else { 199 } else {
195 asm volatile( 200 asm volatile(
196 " lctlg 0,15,0(%0)" 201 " lctlg 0,15,0(%0)"
197 : : "a" (&S390_lowcore.cregs_save_area)); 202 : : "a" (&S390_lowcore.cregs_save_area));
198 } 203 }
199 /* 204 /*
200 * We don't even try to revalidate the TOD register, since we simply 205 * We don't even try to validate the TOD register, since we simply
201 * can't write something sensible into that register. 206 * can't write something sensible into that register.
202 */ 207 */
203 /* 208 /*
204 * See if we can revalidate the TOD programmable register with its 209 * See if we can validate the TOD programmable register with its
205 * old contents (should be zero) otherwise set it to zero. 210 * old contents (should be zero) otherwise set it to zero.
206 */ 211 */
207 if (!mci->pr) 212 if (!mci.pr)
208 asm volatile( 213 asm volatile(
209 " sr 0,0\n" 214 " sr 0,0\n"
210 " sckpf" 215 " sckpf"
@@ -215,17 +220,17 @@ static int notrace s390_revalidate_registers(struct mci *mci)
215 " sckpf" 220 " sckpf"
216 : : "a" (&S390_lowcore.tod_progreg_save_area) 221 : : "a" (&S390_lowcore.tod_progreg_save_area)
217 : "0", "cc"); 222 : "0", "cc");
218 /* Revalidate clock comparator register */ 223 /* Validate clock comparator register */
219 set_clock_comparator(S390_lowcore.clock_comparator); 224 set_clock_comparator(S390_lowcore.clock_comparator);
220 /* Check if old PSW is valid */ 225 /* Check if old PSW is valid */
221 if (!mci->wp) 226 if (!mci.wp)
222 /* 227 /*
223 * Can't tell if we come from user or kernel mode 228 * Can't tell if we come from user or kernel mode
224 * -> stopping machine. 229 * -> stopping machine.
225 */ 230 */
226 s390_handle_damage("old psw invalid."); 231 s390_handle_damage();
227 232
228 if (!mci->ms || !mci->pm || !mci->ia) 233 if (!mci.ms || !mci.pm || !mci.ia)
229 kill_task = 1; 234 kill_task = 1;
230 235
231 return kill_task; 236 return kill_task;
@@ -249,21 +254,21 @@ void notrace s390_do_machine_check(struct pt_regs *regs)
249 static unsigned long long last_ipd; 254 static unsigned long long last_ipd;
250 struct mcck_struct *mcck; 255 struct mcck_struct *mcck;
251 unsigned long long tmp; 256 unsigned long long tmp;
252 struct mci *mci; 257 union mci mci;
253 int umode; 258 int umode;
254 259
255 nmi_enter(); 260 nmi_enter();
256 inc_irq_stat(NMI_NMI); 261 inc_irq_stat(NMI_NMI);
257 mci = (struct mci *) &S390_lowcore.mcck_interruption_code; 262 mci.val = S390_lowcore.mcck_interruption_code;
258 mcck = this_cpu_ptr(&cpu_mcck); 263 mcck = this_cpu_ptr(&cpu_mcck);
259 umode = user_mode(regs); 264 umode = user_mode(regs);
260 265
261 if (mci->sd) { 266 if (mci.sd) {
262 /* System damage -> stopping machine */ 267 /* System damage -> stopping machine */
263 s390_handle_damage("received system damage machine check."); 268 s390_handle_damage();
264 } 269 }
265 if (mci->pd) { 270 if (mci.pd) {
266 if (mci->b) { 271 if (mci.b) {
267 /* Processing backup -> verify if we can survive this */ 272 /* Processing backup -> verify if we can survive this */
268 u64 z_mcic, o_mcic, t_mcic; 273 u64 z_mcic, o_mcic, t_mcic;
269 z_mcic = (1ULL<<63 | 1ULL<<59 | 1ULL<<29); 274 z_mcic = (1ULL<<63 | 1ULL<<59 | 1ULL<<29);
@@ -271,12 +276,11 @@ void notrace s390_do_machine_check(struct pt_regs *regs)
271 1ULL<<36 | 1ULL<<35 | 1ULL<<34 | 1ULL<<32 | 276 1ULL<<36 | 1ULL<<35 | 1ULL<<34 | 1ULL<<32 |
272 1ULL<<30 | 1ULL<<21 | 1ULL<<20 | 1ULL<<17 | 277 1ULL<<30 | 1ULL<<21 | 1ULL<<20 | 1ULL<<17 |
273 1ULL<<16); 278 1ULL<<16);
274 t_mcic = *(u64 *)mci; 279 t_mcic = mci.val;
275 280
276 if (((t_mcic & z_mcic) != 0) || 281 if (((t_mcic & z_mcic) != 0) ||
277 ((t_mcic & o_mcic) != o_mcic)) { 282 ((t_mcic & o_mcic) != o_mcic)) {
278 s390_handle_damage("processing backup machine " 283 s390_handle_damage();
279 "check with damage.");
280 } 284 }
281 285
282 /* 286 /*
@@ -291,64 +295,62 @@ void notrace s390_do_machine_check(struct pt_regs *regs)
291 ipd_count = 1; 295 ipd_count = 1;
292 last_ipd = tmp; 296 last_ipd = tmp;
293 if (ipd_count == MAX_IPD_COUNT) 297 if (ipd_count == MAX_IPD_COUNT)
294 s390_handle_damage("too many ipd retries."); 298 s390_handle_damage();
295 spin_unlock(&ipd_lock); 299 spin_unlock(&ipd_lock);
296 } else { 300 } else {
297 /* Processing damage -> stopping machine */ 301 /* Processing damage -> stopping machine */
298 s390_handle_damage("received instruction processing " 302 s390_handle_damage();
299 "damage machine check.");
300 } 303 }
301 } 304 }
302 if (s390_revalidate_registers(mci)) { 305 if (s390_validate_registers(mci)) {
303 if (umode) { 306 if (umode) {
304 /* 307 /*
305 * Couldn't restore all register contents while in 308 * Couldn't restore all register contents while in
306 * user mode -> mark task for termination. 309 * user mode -> mark task for termination.
307 */ 310 */
308 mcck->kill_task = 1; 311 mcck->kill_task = 1;
309 mcck->mcck_code = *(unsigned long long *) mci; 312 mcck->mcck_code = mci.val;
310 set_cpu_flag(CIF_MCCK_PENDING); 313 set_cpu_flag(CIF_MCCK_PENDING);
311 } else { 314 } else {
312 /* 315 /*
313 * Couldn't restore all register contents while in 316 * Couldn't restore all register contents while in
314 * kernel mode -> stopping machine. 317 * kernel mode -> stopping machine.
315 */ 318 */
316 s390_handle_damage("unable to revalidate registers."); 319 s390_handle_damage();
317 } 320 }
318 } 321 }
319 if (mci->cd) { 322 if (mci.cd) {
320 /* Timing facility damage */ 323 /* Timing facility damage */
321 s390_handle_damage("TOD clock damaged"); 324 s390_handle_damage();
322 } 325 }
323 if (mci->ed && mci->ec) { 326 if (mci.ed && mci.ec) {
324 /* External damage */ 327 /* External damage */
325 if (S390_lowcore.external_damage_code & (1U << ED_ETR_SYNC)) 328 if (S390_lowcore.external_damage_code & (1U << ED_ETR_SYNC))
326 etr_sync_check(); 329 mcck->etr_queue |= etr_sync_check();
327 if (S390_lowcore.external_damage_code & (1U << ED_ETR_SWITCH)) 330 if (S390_lowcore.external_damage_code & (1U << ED_ETR_SWITCH))
328 etr_switch_to_local(); 331 mcck->etr_queue |= etr_switch_to_local();
329 if (S390_lowcore.external_damage_code & (1U << ED_STP_SYNC)) 332 if (S390_lowcore.external_damage_code & (1U << ED_STP_SYNC))
330 stp_sync_check(); 333 mcck->stp_queue |= stp_sync_check();
331 if (S390_lowcore.external_damage_code & (1U << ED_STP_ISLAND)) 334 if (S390_lowcore.external_damage_code & (1U << ED_STP_ISLAND))
332 stp_island_check(); 335 mcck->stp_queue |= stp_island_check();
336 if (mcck->etr_queue || mcck->stp_queue)
337 set_cpu_flag(CIF_MCCK_PENDING);
333 } 338 }
334 if (mci->se) 339 if (mci.se)
335 /* Storage error uncorrected */ 340 /* Storage error uncorrected */
336 s390_handle_damage("received storage error uncorrected " 341 s390_handle_damage();
337 "machine check."); 342 if (mci.ke)
338 if (mci->ke)
339 /* Storage key-error uncorrected */ 343 /* Storage key-error uncorrected */
340 s390_handle_damage("received storage key-error uncorrected " 344 s390_handle_damage();
341 "machine check."); 345 if (mci.ds && mci.fa)
342 if (mci->ds && mci->fa)
343 /* Storage degradation */ 346 /* Storage degradation */
344 s390_handle_damage("received storage degradation machine " 347 s390_handle_damage();
345 "check."); 348 if (mci.cp) {
346 if (mci->cp) {
347 /* Channel report word pending */ 349 /* Channel report word pending */
348 mcck->channel_report = 1; 350 mcck->channel_report = 1;
349 set_cpu_flag(CIF_MCCK_PENDING); 351 set_cpu_flag(CIF_MCCK_PENDING);
350 } 352 }
351 if (mci->w) { 353 if (mci.w) {
352 /* Warning pending */ 354 /* Warning pending */
353 mcck->warning = 1; 355 mcck->warning = 1;
354 set_cpu_flag(CIF_MCCK_PENDING); 356 set_cpu_flag(CIF_MCCK_PENDING);
diff --git a/arch/s390/kernel/perf_cpum_sf.c b/arch/s390/kernel/perf_cpum_sf.c
index b973972f6ba5..3d8da1e742c2 100644
--- a/arch/s390/kernel/perf_cpum_sf.c
+++ b/arch/s390/kernel/perf_cpum_sf.c
@@ -1019,11 +1019,13 @@ static int perf_push_sample(struct perf_event *event, struct sf_raw_sample *sfr)
1019 break; 1019 break;
1020 } 1020 }
1021 1021
1022 /* The host-program-parameter (hpp) contains the pid of 1022 /*
1023 * the CPU thread as set by sie64a() in entry.S. 1023 * A non-zero guest program parameter indicates a guest
1024 * If non-zero assume a guest sample. 1024 * sample.
1025 * Note that some early samples might be misaccounted to
1026 * the host.
1025 */ 1027 */
1026 if (sfr->basic.hpp) 1028 if (sfr->basic.gpp)
1027 sde_regs->in_guest = 1; 1029 sde_regs->in_guest = 1;
1028 1030
1029 overflow = 0; 1031 overflow = 0;
diff --git a/arch/s390/kernel/process.c b/arch/s390/kernel/process.c
index f2dac9f0799d..688a3aad9c79 100644
--- a/arch/s390/kernel/process.c
+++ b/arch/s390/kernel/process.c
@@ -23,6 +23,7 @@
23#include <linux/kprobes.h> 23#include <linux/kprobes.h>
24#include <linux/random.h> 24#include <linux/random.h>
25#include <linux/module.h> 25#include <linux/module.h>
26#include <linux/init_task.h>
26#include <asm/io.h> 27#include <asm/io.h>
27#include <asm/processor.h> 28#include <asm/processor.h>
28#include <asm/vtimer.h> 29#include <asm/vtimer.h>
@@ -36,6 +37,9 @@
36 37
37asmlinkage void ret_from_fork(void) asm ("ret_from_fork"); 38asmlinkage void ret_from_fork(void) asm ("ret_from_fork");
38 39
40/* FPU save area for the init task */
41__vector128 init_task_fpu_regs[__NUM_VXRS] __init_task_data;
42
39/* 43/*
40 * Return saved PC of a blocked thread. used in kernel/sched. 44 * Return saved PC of a blocked thread. used in kernel/sched.
41 * resume in entry.S does not create a new stack frame, it 45 * resume in entry.S does not create a new stack frame, it
@@ -87,31 +91,29 @@ void arch_release_task_struct(struct task_struct *tsk)
87 91
88int arch_dup_task_struct(struct task_struct *dst, struct task_struct *src) 92int arch_dup_task_struct(struct task_struct *dst, struct task_struct *src)
89{ 93{
94 size_t fpu_regs_size;
95
90 *dst = *src; 96 *dst = *src;
91 97
92 /* Set up a new floating-point register save area */ 98 /*
93 dst->thread.fpu.fpc = 0; 99 * If the vector extension is available, it is enabled for all tasks,
94 dst->thread.fpu.flags = 0; /* Always start with VX disabled */ 100 * and, thus, the FPU register save area must be allocated accordingly.
95 dst->thread.fpu.fprs = kzalloc(sizeof(freg_t) * __NUM_FPRS, 101 */
96 GFP_KERNEL|__GFP_REPEAT); 102 fpu_regs_size = MACHINE_HAS_VX ? sizeof(__vector128) * __NUM_VXRS
97 if (!dst->thread.fpu.fprs) 103 : sizeof(freg_t) * __NUM_FPRS;
104 dst->thread.fpu.regs = kzalloc(fpu_regs_size, GFP_KERNEL|__GFP_REPEAT);
105 if (!dst->thread.fpu.regs)
98 return -ENOMEM; 106 return -ENOMEM;
99 107
100 /* 108 /*
101 * Save the floating-point or vector register state of the current 109 * Save the floating-point or vector register state of the current
102 * task. The state is not saved for early kernel threads, for example, 110 * task and set the CIF_FPU flag to lazy restore the FPU register
103 * the init_task, which do not have an allocated save area. 111 * state when returning to user space.
104 * The CIF_FPU flag is set in any case to lazy clear or restore a saved
105 * state when switching to a different task or returning to user space.
106 */ 112 */
107 save_fpu_regs(); 113 save_fpu_regs();
108 dst->thread.fpu.fpc = current->thread.fpu.fpc; 114 dst->thread.fpu.fpc = current->thread.fpu.fpc;
109 if (is_vx_task(current)) 115 memcpy(dst->thread.fpu.regs, current->thread.fpu.regs, fpu_regs_size);
110 convert_vx_to_fp(dst->thread.fpu.fprs, 116
111 current->thread.fpu.vxrs);
112 else
113 memcpy(dst->thread.fpu.fprs, current->thread.fpu.fprs,
114 sizeof(freg_t) * __NUM_FPRS);
115 return 0; 117 return 0;
116} 118}
117 119
@@ -169,7 +171,6 @@ int copy_thread(unsigned long clone_flags, unsigned long new_stackp,
169 171
170 /* Don't copy runtime instrumentation info */ 172 /* Don't copy runtime instrumentation info */
171 p->thread.ri_cb = NULL; 173 p->thread.ri_cb = NULL;
172 p->thread.ri_signum = 0;
173 frame->childregs.psw.mask &= ~PSW_MASK_RI; 174 frame->childregs.psw.mask &= ~PSW_MASK_RI;
174 175
175 /* Set a new TLS ? */ 176 /* Set a new TLS ? */
@@ -199,7 +200,7 @@ int dump_fpu (struct pt_regs * regs, s390_fp_regs *fpregs)
199 save_fpu_regs(); 200 save_fpu_regs();
200 fpregs->fpc = current->thread.fpu.fpc; 201 fpregs->fpc = current->thread.fpu.fpc;
201 fpregs->pad = 0; 202 fpregs->pad = 0;
202 if (is_vx_task(current)) 203 if (MACHINE_HAS_VX)
203 convert_vx_to_fp((freg_t *)&fpregs->fprs, 204 convert_vx_to_fp((freg_t *)&fpregs->fprs,
204 current->thread.fpu.vxrs); 205 current->thread.fpu.vxrs);
205 else 206 else
diff --git a/arch/s390/kernel/processor.c b/arch/s390/kernel/processor.c
index e6e077ae3990..7ce00e7a709a 100644
--- a/arch/s390/kernel/processor.c
+++ b/arch/s390/kernel/processor.c
@@ -11,6 +11,7 @@
11#include <linux/seq_file.h> 11#include <linux/seq_file.h>
12#include <linux/delay.h> 12#include <linux/delay.h>
13#include <linux/cpu.h> 13#include <linux/cpu.h>
14#include <asm/diag.h>
14#include <asm/elf.h> 15#include <asm/elf.h>
15#include <asm/lowcore.h> 16#include <asm/lowcore.h>
16#include <asm/param.h> 17#include <asm/param.h>
@@ -20,8 +21,10 @@ static DEFINE_PER_CPU(struct cpuid, cpu_id);
20 21
21void notrace cpu_relax(void) 22void notrace cpu_relax(void)
22{ 23{
23 if (!smp_cpu_mtid && MACHINE_HAS_DIAG44) 24 if (!smp_cpu_mtid && MACHINE_HAS_DIAG44) {
25 diag_stat_inc(DIAG_STAT_X044);
24 asm volatile("diag 0,0,0x44"); 26 asm volatile("diag 0,0,0x44");
27 }
25 barrier(); 28 barrier();
26} 29}
27EXPORT_SYMBOL(cpu_relax); 30EXPORT_SYMBOL(cpu_relax);
diff --git a/arch/s390/kernel/ptrace.c b/arch/s390/kernel/ptrace.c
index 8b1c8e33f184..01c37b36caf9 100644
--- a/arch/s390/kernel/ptrace.c
+++ b/arch/s390/kernel/ptrace.c
@@ -239,12 +239,12 @@ static unsigned long __peek_user(struct task_struct *child, addr_t addr)
239 * or the child->thread.fpu.vxrs array 239 * or the child->thread.fpu.vxrs array
240 */ 240 */
241 offset = addr - (addr_t) &dummy->regs.fp_regs.fprs; 241 offset = addr - (addr_t) &dummy->regs.fp_regs.fprs;
242 if (is_vx_task(child)) 242 if (MACHINE_HAS_VX)
243 tmp = *(addr_t *) 243 tmp = *(addr_t *)
244 ((addr_t) child->thread.fpu.vxrs + 2*offset); 244 ((addr_t) child->thread.fpu.vxrs + 2*offset);
245 else 245 else
246 tmp = *(addr_t *) 246 tmp = *(addr_t *)
247 ((addr_t) &child->thread.fpu.fprs + offset); 247 ((addr_t) child->thread.fpu.fprs + offset);
248 248
249 } else if (addr < (addr_t) (&dummy->regs.per_info + 1)) { 249 } else if (addr < (addr_t) (&dummy->regs.per_info + 1)) {
250 /* 250 /*
@@ -383,12 +383,12 @@ static int __poke_user(struct task_struct *child, addr_t addr, addr_t data)
383 * or the child->thread.fpu.vxrs array 383 * or the child->thread.fpu.vxrs array
384 */ 384 */
385 offset = addr - (addr_t) &dummy->regs.fp_regs.fprs; 385 offset = addr - (addr_t) &dummy->regs.fp_regs.fprs;
386 if (is_vx_task(child)) 386 if (MACHINE_HAS_VX)
387 *(addr_t *)((addr_t) 387 *(addr_t *)((addr_t)
388 child->thread.fpu.vxrs + 2*offset) = data; 388 child->thread.fpu.vxrs + 2*offset) = data;
389 else 389 else
390 *(addr_t *)((addr_t) 390 *(addr_t *)((addr_t)
391 &child->thread.fpu.fprs + offset) = data; 391 child->thread.fpu.fprs + offset) = data;
392 392
393 } else if (addr < (addr_t) (&dummy->regs.per_info + 1)) { 393 } else if (addr < (addr_t) (&dummy->regs.per_info + 1)) {
394 /* 394 /*
@@ -617,12 +617,12 @@ static u32 __peek_user_compat(struct task_struct *child, addr_t addr)
617 * or the child->thread.fpu.vxrs array 617 * or the child->thread.fpu.vxrs array
618 */ 618 */
619 offset = addr - (addr_t) &dummy32->regs.fp_regs.fprs; 619 offset = addr - (addr_t) &dummy32->regs.fp_regs.fprs;
620 if (is_vx_task(child)) 620 if (MACHINE_HAS_VX)
621 tmp = *(__u32 *) 621 tmp = *(__u32 *)
622 ((addr_t) child->thread.fpu.vxrs + 2*offset); 622 ((addr_t) child->thread.fpu.vxrs + 2*offset);
623 else 623 else
624 tmp = *(__u32 *) 624 tmp = *(__u32 *)
625 ((addr_t) &child->thread.fpu.fprs + offset); 625 ((addr_t) child->thread.fpu.fprs + offset);
626 626
627 } else if (addr < (addr_t) (&dummy32->regs.per_info + 1)) { 627 } else if (addr < (addr_t) (&dummy32->regs.per_info + 1)) {
628 /* 628 /*
@@ -742,12 +742,12 @@ static int __poke_user_compat(struct task_struct *child,
742 * or the child->thread.fpu.vxrs array 742 * or the child->thread.fpu.vxrs array
743 */ 743 */
744 offset = addr - (addr_t) &dummy32->regs.fp_regs.fprs; 744 offset = addr - (addr_t) &dummy32->regs.fp_regs.fprs;
745 if (is_vx_task(child)) 745 if (MACHINE_HAS_VX)
746 *(__u32 *)((addr_t) 746 *(__u32 *)((addr_t)
747 child->thread.fpu.vxrs + 2*offset) = tmp; 747 child->thread.fpu.vxrs + 2*offset) = tmp;
748 else 748 else
749 *(__u32 *)((addr_t) 749 *(__u32 *)((addr_t)
750 &child->thread.fpu.fprs + offset) = tmp; 750 child->thread.fpu.fprs + offset) = tmp;
751 751
752 } else if (addr < (addr_t) (&dummy32->regs.per_info + 1)) { 752 } else if (addr < (addr_t) (&dummy32->regs.per_info + 1)) {
753 /* 753 /*
@@ -981,7 +981,7 @@ static int s390_fpregs_set(struct task_struct *target,
981 if (rc) 981 if (rc)
982 return rc; 982 return rc;
983 983
984 if (is_vx_task(target)) 984 if (MACHINE_HAS_VX)
985 convert_fp_to_vx(target->thread.fpu.vxrs, fprs); 985 convert_fp_to_vx(target->thread.fpu.vxrs, fprs);
986 else 986 else
987 memcpy(target->thread.fpu.fprs, &fprs, sizeof(fprs)); 987 memcpy(target->thread.fpu.fprs, &fprs, sizeof(fprs));
@@ -1047,13 +1047,10 @@ static int s390_vxrs_low_get(struct task_struct *target,
1047 1047
1048 if (!MACHINE_HAS_VX) 1048 if (!MACHINE_HAS_VX)
1049 return -ENODEV; 1049 return -ENODEV;
1050 if (is_vx_task(target)) { 1050 if (target == current)
1051 if (target == current) 1051 save_fpu_regs();
1052 save_fpu_regs(); 1052 for (i = 0; i < __NUM_VXRS_LOW; i++)
1053 for (i = 0; i < __NUM_VXRS_LOW; i++) 1053 vxrs[i] = *((__u64 *)(target->thread.fpu.vxrs + i) + 1);
1054 vxrs[i] = *((__u64 *)(target->thread.fpu.vxrs + i) + 1);
1055 } else
1056 memset(vxrs, 0, sizeof(vxrs));
1057 return user_regset_copyout(&pos, &count, &kbuf, &ubuf, vxrs, 0, -1); 1054 return user_regset_copyout(&pos, &count, &kbuf, &ubuf, vxrs, 0, -1);
1058} 1055}
1059 1056
@@ -1067,11 +1064,7 @@ static int s390_vxrs_low_set(struct task_struct *target,
1067 1064
1068 if (!MACHINE_HAS_VX) 1065 if (!MACHINE_HAS_VX)
1069 return -ENODEV; 1066 return -ENODEV;
1070 if (!is_vx_task(target)) { 1067 if (target == current)
1071 rc = alloc_vector_registers(target);
1072 if (rc)
1073 return rc;
1074 } else if (target == current)
1075 save_fpu_regs(); 1068 save_fpu_regs();
1076 1069
1077 rc = user_regset_copyin(&pos, &count, &kbuf, &ubuf, vxrs, 0, -1); 1070 rc = user_regset_copyin(&pos, &count, &kbuf, &ubuf, vxrs, 0, -1);
@@ -1091,13 +1084,10 @@ static int s390_vxrs_high_get(struct task_struct *target,
1091 1084
1092 if (!MACHINE_HAS_VX) 1085 if (!MACHINE_HAS_VX)
1093 return -ENODEV; 1086 return -ENODEV;
1094 if (is_vx_task(target)) { 1087 if (target == current)
1095 if (target == current) 1088 save_fpu_regs();
1096 save_fpu_regs(); 1089 memcpy(vxrs, target->thread.fpu.vxrs + __NUM_VXRS_LOW, sizeof(vxrs));
1097 memcpy(vxrs, target->thread.fpu.vxrs + __NUM_VXRS_LOW, 1090
1098 sizeof(vxrs));
1099 } else
1100 memset(vxrs, 0, sizeof(vxrs));
1101 return user_regset_copyout(&pos, &count, &kbuf, &ubuf, vxrs, 0, -1); 1091 return user_regset_copyout(&pos, &count, &kbuf, &ubuf, vxrs, 0, -1);
1102} 1092}
1103 1093
@@ -1110,11 +1100,7 @@ static int s390_vxrs_high_set(struct task_struct *target,
1110 1100
1111 if (!MACHINE_HAS_VX) 1101 if (!MACHINE_HAS_VX)
1112 return -ENODEV; 1102 return -ENODEV;
1113 if (!is_vx_task(target)) { 1103 if (target == current)
1114 rc = alloc_vector_registers(target);
1115 if (rc)
1116 return rc;
1117 } else if (target == current)
1118 save_fpu_regs(); 1104 save_fpu_regs();
1119 1105
1120 rc = user_regset_copyin(&pos, &count, &kbuf, &ubuf, 1106 rc = user_regset_copyin(&pos, &count, &kbuf, &ubuf,
diff --git a/arch/s390/kernel/runtime_instr.c b/arch/s390/kernel/runtime_instr.c
index 26b4ae96fdd7..fffa0e5462af 100644
--- a/arch/s390/kernel/runtime_instr.c
+++ b/arch/s390/kernel/runtime_instr.c
@@ -18,11 +18,6 @@
18/* empty control block to disable RI by loading it */ 18/* empty control block to disable RI by loading it */
19struct runtime_instr_cb runtime_instr_empty_cb; 19struct runtime_instr_cb runtime_instr_empty_cb;
20 20
21static int runtime_instr_avail(void)
22{
23 return test_facility(64);
24}
25
26static void disable_runtime_instr(void) 21static void disable_runtime_instr(void)
27{ 22{
28 struct pt_regs *regs = task_pt_regs(current); 23 struct pt_regs *regs = task_pt_regs(current);
@@ -40,7 +35,6 @@ static void disable_runtime_instr(void)
40static void init_runtime_instr_cb(struct runtime_instr_cb *cb) 35static void init_runtime_instr_cb(struct runtime_instr_cb *cb)
41{ 36{
42 cb->buf_limit = 0xfff; 37 cb->buf_limit = 0xfff;
43 cb->int_requested = 1;
44 cb->pstate = 1; 38 cb->pstate = 1;
45 cb->pstate_set_buf = 1; 39 cb->pstate_set_buf = 1;
46 cb->pstate_sample = 1; 40 cb->pstate_sample = 1;
@@ -57,46 +51,14 @@ void exit_thread_runtime_instr(void)
57 return; 51 return;
58 disable_runtime_instr(); 52 disable_runtime_instr();
59 kfree(task->thread.ri_cb); 53 kfree(task->thread.ri_cb);
60 task->thread.ri_signum = 0;
61 task->thread.ri_cb = NULL; 54 task->thread.ri_cb = NULL;
62} 55}
63 56
64static void runtime_instr_int_handler(struct ext_code ext_code, 57SYSCALL_DEFINE1(s390_runtime_instr, int, command)
65 unsigned int param32, unsigned long param64)
66{
67 struct siginfo info;
68
69 if (!(param32 & CPU_MF_INT_RI_MASK))
70 return;
71
72 inc_irq_stat(IRQEXT_CMR);
73
74 if (!current->thread.ri_cb)
75 return;
76 if (current->thread.ri_signum < SIGRTMIN ||
77 current->thread.ri_signum > SIGRTMAX) {
78 WARN_ON_ONCE(1);
79 return;
80 }
81
82 memset(&info, 0, sizeof(info));
83 info.si_signo = current->thread.ri_signum;
84 info.si_code = SI_QUEUE;
85 if (param32 & CPU_MF_INT_RI_BUF_FULL)
86 info.si_int = ENOBUFS;
87 else if (param32 & CPU_MF_INT_RI_HALTED)
88 info.si_int = ECANCELED;
89 else
90 return; /* unknown reason */
91
92 send_sig_info(current->thread.ri_signum, &info, current);
93}
94
95SYSCALL_DEFINE2(s390_runtime_instr, int, command, int, signum)
96{ 58{
97 struct runtime_instr_cb *cb; 59 struct runtime_instr_cb *cb;
98 60
99 if (!runtime_instr_avail()) 61 if (!test_facility(64))
100 return -EOPNOTSUPP; 62 return -EOPNOTSUPP;
101 63
102 if (command == S390_RUNTIME_INSTR_STOP) { 64 if (command == S390_RUNTIME_INSTR_STOP) {
@@ -106,8 +68,7 @@ SYSCALL_DEFINE2(s390_runtime_instr, int, command, int, signum)
106 return 0; 68 return 0;
107 } 69 }
108 70
109 if (command != S390_RUNTIME_INSTR_START || 71 if (command != S390_RUNTIME_INSTR_START)
110 (signum < SIGRTMIN || signum > SIGRTMAX))
111 return -EINVAL; 72 return -EINVAL;
112 73
113 if (!current->thread.ri_cb) { 74 if (!current->thread.ri_cb) {
@@ -120,7 +81,6 @@ SYSCALL_DEFINE2(s390_runtime_instr, int, command, int, signum)
120 } 81 }
121 82
122 init_runtime_instr_cb(cb); 83 init_runtime_instr_cb(cb);
123 current->thread.ri_signum = signum;
124 84
125 /* now load the control block to make it available */ 85 /* now load the control block to make it available */
126 preempt_disable(); 86 preempt_disable();
@@ -129,21 +89,3 @@ SYSCALL_DEFINE2(s390_runtime_instr, int, command, int, signum)
129 preempt_enable(); 89 preempt_enable();
130 return 0; 90 return 0;
131} 91}
132
133static int __init runtime_instr_init(void)
134{
135 int rc;
136
137 if (!runtime_instr_avail())
138 return 0;
139
140 irq_subclass_register(IRQ_SUBCLASS_MEASUREMENT_ALERT);
141 rc = register_external_irq(EXT_IRQ_MEASURE_ALERT,
142 runtime_instr_int_handler);
143 if (rc)
144 irq_subclass_unregister(IRQ_SUBCLASS_MEASUREMENT_ALERT);
145 else
146 pr_info("Runtime instrumentation facility initialized\n");
147 return rc;
148}
149device_initcall(runtime_instr_init);
diff --git a/arch/s390/kernel/s390_ksyms.c b/arch/s390/kernel/s390_ksyms.c
index 5090d3dad10b..e67453b73c3c 100644
--- a/arch/s390/kernel/s390_ksyms.c
+++ b/arch/s390/kernel/s390_ksyms.c
@@ -1,6 +1,6 @@
1#include <linux/module.h> 1#include <linux/module.h>
2#include <linux/kvm_host.h> 2#include <linux/kvm_host.h>
3#include <asm/fpu-internal.h> 3#include <asm/fpu/api.h>
4#include <asm/ftrace.h> 4#include <asm/ftrace.h>
5 5
6#ifdef CONFIG_FUNCTION_TRACER 6#ifdef CONFIG_FUNCTION_TRACER
@@ -10,7 +10,6 @@ EXPORT_SYMBOL(_mcount);
10EXPORT_SYMBOL(sie64a); 10EXPORT_SYMBOL(sie64a);
11EXPORT_SYMBOL(sie_exit); 11EXPORT_SYMBOL(sie_exit);
12EXPORT_SYMBOL(save_fpu_regs); 12EXPORT_SYMBOL(save_fpu_regs);
13EXPORT_SYMBOL(__ctl_set_vx);
14#endif 13#endif
15EXPORT_SYMBOL(memcpy); 14EXPORT_SYMBOL(memcpy);
16EXPORT_SYMBOL(memset); 15EXPORT_SYMBOL(memset);
diff --git a/arch/s390/kernel/signal.c b/arch/s390/kernel/signal.c
index 9549af102d75..028cc46cb82a 100644
--- a/arch/s390/kernel/signal.c
+++ b/arch/s390/kernel/signal.c
@@ -179,7 +179,7 @@ static int save_sigregs_ext(struct pt_regs *regs,
179 int i; 179 int i;
180 180
181 /* Save vector registers to signal stack */ 181 /* Save vector registers to signal stack */
182 if (is_vx_task(current)) { 182 if (MACHINE_HAS_VX) {
183 for (i = 0; i < __NUM_VXRS_LOW; i++) 183 for (i = 0; i < __NUM_VXRS_LOW; i++)
184 vxrs[i] = *((__u64 *)(current->thread.fpu.vxrs + i) + 1); 184 vxrs[i] = *((__u64 *)(current->thread.fpu.vxrs + i) + 1);
185 if (__copy_to_user(&sregs_ext->vxrs_low, vxrs, 185 if (__copy_to_user(&sregs_ext->vxrs_low, vxrs,
@@ -199,7 +199,7 @@ static int restore_sigregs_ext(struct pt_regs *regs,
199 int i; 199 int i;
200 200
201 /* Restore vector registers from signal stack */ 201 /* Restore vector registers from signal stack */
202 if (is_vx_task(current)) { 202 if (MACHINE_HAS_VX) {
203 if (__copy_from_user(vxrs, &sregs_ext->vxrs_low, 203 if (__copy_from_user(vxrs, &sregs_ext->vxrs_low,
204 sizeof(sregs_ext->vxrs_low)) || 204 sizeof(sregs_ext->vxrs_low)) ||
205 __copy_from_user(current->thread.fpu.vxrs + __NUM_VXRS_LOW, 205 __copy_from_user(current->thread.fpu.vxrs + __NUM_VXRS_LOW,
@@ -381,8 +381,7 @@ static int setup_rt_frame(struct ksignal *ksig, sigset_t *set,
381 uc_flags = 0; 381 uc_flags = 0;
382 if (MACHINE_HAS_VX) { 382 if (MACHINE_HAS_VX) {
383 frame_size += sizeof(_sigregs_ext); 383 frame_size += sizeof(_sigregs_ext);
384 if (is_vx_task(current)) 384 uc_flags |= UC_VXRS;
385 uc_flags |= UC_VXRS;
386 } 385 }
387 frame = get_sigframe(&ksig->ka, regs, frame_size); 386 frame = get_sigframe(&ksig->ka, regs, frame_size);
388 if (frame == (void __user *) -1UL) 387 if (frame == (void __user *) -1UL)
diff --git a/arch/s390/kernel/smp.c b/arch/s390/kernel/smp.c
index c6355e6f3fcc..9062df575afe 100644
--- a/arch/s390/kernel/smp.c
+++ b/arch/s390/kernel/smp.c
@@ -33,6 +33,7 @@
33#include <linux/crash_dump.h> 33#include <linux/crash_dump.h>
34#include <linux/memblock.h> 34#include <linux/memblock.h>
35#include <asm/asm-offsets.h> 35#include <asm/asm-offsets.h>
36#include <asm/diag.h>
36#include <asm/switch_to.h> 37#include <asm/switch_to.h>
37#include <asm/facility.h> 38#include <asm/facility.h>
38#include <asm/ipl.h> 39#include <asm/ipl.h>
@@ -261,6 +262,8 @@ static void pcpu_attach_task(struct pcpu *pcpu, struct task_struct *tsk)
261 + THREAD_SIZE - STACK_FRAME_OVERHEAD - sizeof(struct pt_regs); 262 + THREAD_SIZE - STACK_FRAME_OVERHEAD - sizeof(struct pt_regs);
262 lc->thread_info = (unsigned long) task_thread_info(tsk); 263 lc->thread_info = (unsigned long) task_thread_info(tsk);
263 lc->current_task = (unsigned long) tsk; 264 lc->current_task = (unsigned long) tsk;
265 lc->lpp = LPP_MAGIC;
266 lc->current_pid = tsk->pid;
264 lc->user_timer = ti->user_timer; 267 lc->user_timer = ti->user_timer;
265 lc->system_timer = ti->system_timer; 268 lc->system_timer = ti->system_timer;
266 lc->steal_timer = 0; 269 lc->steal_timer = 0;
@@ -375,11 +378,14 @@ int smp_vcpu_scheduled(int cpu)
375 378
376void smp_yield_cpu(int cpu) 379void smp_yield_cpu(int cpu)
377{ 380{
378 if (MACHINE_HAS_DIAG9C) 381 if (MACHINE_HAS_DIAG9C) {
382 diag_stat_inc_norecursion(DIAG_STAT_X09C);
379 asm volatile("diag %0,0,0x9c" 383 asm volatile("diag %0,0,0x9c"
380 : : "d" (pcpu_devices[cpu].address)); 384 : : "d" (pcpu_devices[cpu].address));
381 else if (MACHINE_HAS_DIAG44) 385 } else if (MACHINE_HAS_DIAG44) {
386 diag_stat_inc_norecursion(DIAG_STAT_X044);
382 asm volatile("diag 0,0,0x44"); 387 asm volatile("diag 0,0,0x44");
388 }
383} 389}
384 390
385/* 391/*
diff --git a/arch/s390/kernel/time.c b/arch/s390/kernel/time.c
index 017c3a9bfc28..99f84ac31307 100644
--- a/arch/s390/kernel/time.c
+++ b/arch/s390/kernel/time.c
@@ -542,16 +542,17 @@ arch_initcall(etr_init);
542 * Switch to local machine check. This is called when the last usable 542 * Switch to local machine check. This is called when the last usable
543 * ETR port goes inactive. After switch to local the clock is not in sync. 543 * ETR port goes inactive. After switch to local the clock is not in sync.
544 */ 544 */
545void etr_switch_to_local(void) 545int etr_switch_to_local(void)
546{ 546{
547 if (!etr_eacr.sl) 547 if (!etr_eacr.sl)
548 return; 548 return 0;
549 disable_sync_clock(NULL); 549 disable_sync_clock(NULL);
550 if (!test_and_set_bit(ETR_EVENT_SWITCH_LOCAL, &etr_events)) { 550 if (!test_and_set_bit(ETR_EVENT_SWITCH_LOCAL, &etr_events)) {
551 etr_eacr.es = etr_eacr.sl = 0; 551 etr_eacr.es = etr_eacr.sl = 0;
552 etr_setr(&etr_eacr); 552 etr_setr(&etr_eacr);
553 queue_work(time_sync_wq, &etr_work); 553 return 1;
554 } 554 }
555 return 0;
555} 556}
556 557
557/* 558/*
@@ -560,16 +561,22 @@ void etr_switch_to_local(void)
560 * After a ETR sync check the clock is not in sync. The machine check 561 * After a ETR sync check the clock is not in sync. The machine check
561 * is broadcasted to all cpus at the same time. 562 * is broadcasted to all cpus at the same time.
562 */ 563 */
563void etr_sync_check(void) 564int etr_sync_check(void)
564{ 565{
565 if (!etr_eacr.es) 566 if (!etr_eacr.es)
566 return; 567 return 0;
567 disable_sync_clock(NULL); 568 disable_sync_clock(NULL);
568 if (!test_and_set_bit(ETR_EVENT_SYNC_CHECK, &etr_events)) { 569 if (!test_and_set_bit(ETR_EVENT_SYNC_CHECK, &etr_events)) {
569 etr_eacr.es = 0; 570 etr_eacr.es = 0;
570 etr_setr(&etr_eacr); 571 etr_setr(&etr_eacr);
571 queue_work(time_sync_wq, &etr_work); 572 return 1;
572 } 573 }
574 return 0;
575}
576
577void etr_queue_work(void)
578{
579 queue_work(time_sync_wq, &etr_work);
573} 580}
574 581
575/* 582/*
@@ -1504,10 +1511,10 @@ static void stp_timing_alert(struct stp_irq_parm *intparm)
1504 * After a STP sync check the clock is not in sync. The machine check 1511 * After a STP sync check the clock is not in sync. The machine check
1505 * is broadcasted to all cpus at the same time. 1512 * is broadcasted to all cpus at the same time.
1506 */ 1513 */
1507void stp_sync_check(void) 1514int stp_sync_check(void)
1508{ 1515{
1509 disable_sync_clock(NULL); 1516 disable_sync_clock(NULL);
1510 queue_work(time_sync_wq, &stp_work); 1517 return 1;
1511} 1518}
1512 1519
1513/* 1520/*
@@ -1516,12 +1523,16 @@ void stp_sync_check(void)
1516 * have matching CTN ids and have a valid stratum-1 configuration 1523 * have matching CTN ids and have a valid stratum-1 configuration
1517 * but the configurations do not match. 1524 * but the configurations do not match.
1518 */ 1525 */
1519void stp_island_check(void) 1526int stp_island_check(void)
1520{ 1527{
1521 disable_sync_clock(NULL); 1528 disable_sync_clock(NULL);
1522 queue_work(time_sync_wq, &stp_work); 1529 return 1;
1523} 1530}
1524 1531
1532void stp_queue_work(void)
1533{
1534 queue_work(time_sync_wq, &stp_work);
1535}
1525 1536
1526static int stp_sync_clock(void *data) 1537static int stp_sync_clock(void *data)
1527{ 1538{
diff --git a/arch/s390/kernel/topology.c b/arch/s390/kernel/topology.c
index bf05e7fc3e70..40b8102fdadb 100644
--- a/arch/s390/kernel/topology.c
+++ b/arch/s390/kernel/topology.c
@@ -84,6 +84,7 @@ static struct mask_info *add_cpus_to_mask(struct topology_core *tl_core,
84 struct mask_info *socket, 84 struct mask_info *socket,
85 int one_socket_per_cpu) 85 int one_socket_per_cpu)
86{ 86{
87 struct cpu_topology_s390 *topo;
87 unsigned int core; 88 unsigned int core;
88 89
89 for_each_set_bit(core, &tl_core->mask[0], TOPOLOGY_CORE_BITS) { 90 for_each_set_bit(core, &tl_core->mask[0], TOPOLOGY_CORE_BITS) {
@@ -95,15 +96,16 @@ static struct mask_info *add_cpus_to_mask(struct topology_core *tl_core,
95 if (lcpu < 0) 96 if (lcpu < 0)
96 continue; 97 continue;
97 for (i = 0; i <= smp_cpu_mtid; i++) { 98 for (i = 0; i <= smp_cpu_mtid; i++) {
98 per_cpu(cpu_topology, lcpu + i).book_id = book->id; 99 topo = &per_cpu(cpu_topology, lcpu + i);
99 per_cpu(cpu_topology, lcpu + i).core_id = rcore; 100 topo->book_id = book->id;
100 per_cpu(cpu_topology, lcpu + i).thread_id = lcpu + i; 101 topo->core_id = rcore;
102 topo->thread_id = lcpu + i;
101 cpumask_set_cpu(lcpu + i, &book->mask); 103 cpumask_set_cpu(lcpu + i, &book->mask);
102 cpumask_set_cpu(lcpu + i, &socket->mask); 104 cpumask_set_cpu(lcpu + i, &socket->mask);
103 if (one_socket_per_cpu) 105 if (one_socket_per_cpu)
104 per_cpu(cpu_topology, lcpu + i).socket_id = rcore; 106 topo->socket_id = rcore;
105 else 107 else
106 per_cpu(cpu_topology, lcpu + i).socket_id = socket->id; 108 topo->socket_id = socket->id;
107 smp_cpu_set_polarization(lcpu + i, tl_core->pp); 109 smp_cpu_set_polarization(lcpu + i, tl_core->pp);
108 } 110 }
109 if (one_socket_per_cpu) 111 if (one_socket_per_cpu)
@@ -247,17 +249,19 @@ int topology_set_cpu_management(int fc)
247 249
248static void update_cpu_masks(void) 250static void update_cpu_masks(void)
249{ 251{
252 struct cpu_topology_s390 *topo;
250 int cpu; 253 int cpu;
251 254
252 for_each_possible_cpu(cpu) { 255 for_each_possible_cpu(cpu) {
253 per_cpu(cpu_topology, cpu).thread_mask = cpu_thread_map(cpu); 256 topo = &per_cpu(cpu_topology, cpu);
254 per_cpu(cpu_topology, cpu).core_mask = cpu_group_map(&socket_info, cpu); 257 topo->thread_mask = cpu_thread_map(cpu);
255 per_cpu(cpu_topology, cpu).book_mask = cpu_group_map(&book_info, cpu); 258 topo->core_mask = cpu_group_map(&socket_info, cpu);
259 topo->book_mask = cpu_group_map(&book_info, cpu);
256 if (!MACHINE_HAS_TOPOLOGY) { 260 if (!MACHINE_HAS_TOPOLOGY) {
257 per_cpu(cpu_topology, cpu).thread_id = cpu; 261 topo->thread_id = cpu;
258 per_cpu(cpu_topology, cpu).core_id = cpu; 262 topo->core_id = cpu;
259 per_cpu(cpu_topology, cpu).socket_id = cpu; 263 topo->socket_id = cpu;
260 per_cpu(cpu_topology, cpu).book_id = cpu; 264 topo->book_id = cpu;
261 } 265 }
262 } 266 }
263 numa_update_cpu_topology(); 267 numa_update_cpu_topology();
diff --git a/arch/s390/kernel/trace.c b/arch/s390/kernel/trace.c
new file mode 100644
index 000000000000..73239bb576c4
--- /dev/null
+++ b/arch/s390/kernel/trace.c
@@ -0,0 +1,29 @@
1/*
2 * Tracepoint definitions for s390
3 *
4 * Copyright IBM Corp. 2015
5 * Author(s): Martin Schwidefsky <schwidefsky@de.ibm.com>
6 */
7
8#include <linux/percpu.h>
9#define CREATE_TRACE_POINTS
10#include <asm/trace/diag.h>
11
12EXPORT_TRACEPOINT_SYMBOL(diagnose);
13
14static DEFINE_PER_CPU(unsigned int, diagnose_trace_depth);
15
16void trace_diagnose_norecursion(int diag_nr)
17{
18 unsigned long flags;
19 unsigned int *depth;
20
21 local_irq_save(flags);
22 depth = this_cpu_ptr(&diagnose_trace_depth);
23 if (*depth == 0) {
24 (*depth)++;
25 trace_diagnose(diag_nr);
26 (*depth)--;
27 }
28 local_irq_restore(flags);
29}
diff --git a/arch/s390/kernel/traps.c b/arch/s390/kernel/traps.c
index 9861613fb35a..1b18118bbc06 100644
--- a/arch/s390/kernel/traps.c
+++ b/arch/s390/kernel/traps.c
@@ -19,7 +19,7 @@
19#include <linux/sched.h> 19#include <linux/sched.h>
20#include <linux/mm.h> 20#include <linux/mm.h>
21#include <linux/slab.h> 21#include <linux/slab.h>
22#include <asm/fpu-internal.h> 22#include <asm/fpu/api.h>
23#include "entry.h" 23#include "entry.h"
24 24
25int show_unhandled_signals = 1; 25int show_unhandled_signals = 1;
@@ -224,29 +224,6 @@ NOKPROBE_SYMBOL(illegal_op);
224DO_ERROR_INFO(specification_exception, SIGILL, ILL_ILLOPN, 224DO_ERROR_INFO(specification_exception, SIGILL, ILL_ILLOPN,
225 "specification exception"); 225 "specification exception");
226 226
227int alloc_vector_registers(struct task_struct *tsk)
228{
229 __vector128 *vxrs;
230 freg_t *fprs;
231
232 /* Allocate vector register save area. */
233 vxrs = kzalloc(sizeof(__vector128) * __NUM_VXRS,
234 GFP_KERNEL|__GFP_REPEAT);
235 if (!vxrs)
236 return -ENOMEM;
237 preempt_disable();
238 if (tsk == current)
239 save_fpu_regs();
240 /* Copy the 16 floating point registers */
241 convert_fp_to_vx(vxrs, tsk->thread.fpu.fprs);
242 fprs = tsk->thread.fpu.fprs;
243 tsk->thread.fpu.vxrs = vxrs;
244 tsk->thread.fpu.flags |= FPU_USE_VX;
245 kfree(fprs);
246 preempt_enable();
247 return 0;
248}
249
250void vector_exception(struct pt_regs *regs) 227void vector_exception(struct pt_regs *regs)
251{ 228{
252 int si_code, vic; 229 int si_code, vic;
@@ -281,13 +258,6 @@ void vector_exception(struct pt_regs *regs)
281 do_trap(regs, SIGFPE, si_code, "vector exception"); 258 do_trap(regs, SIGFPE, si_code, "vector exception");
282} 259}
283 260
284static int __init disable_vector_extension(char *str)
285{
286 S390_lowcore.machine_flags &= ~MACHINE_FLAG_VX;
287 return 1;
288}
289__setup("novx", disable_vector_extension);
290
291void data_exception(struct pt_regs *regs) 261void data_exception(struct pt_regs *regs)
292{ 262{
293 __u16 __user *location; 263 __u16 __user *location;
@@ -296,15 +266,6 @@ void data_exception(struct pt_regs *regs)
296 location = get_trap_ip(regs); 266 location = get_trap_ip(regs);
297 267
298 save_fpu_regs(); 268 save_fpu_regs();
299 /* Check for vector register enablement */
300 if (MACHINE_HAS_VX && !is_vx_task(current) &&
301 (current->thread.fpu.fpc & FPC_DXC_MASK) == 0xfe00) {
302 alloc_vector_registers(current);
303 /* Vector data exception is suppressing, rewind psw. */
304 regs->psw.addr = __rewind_psw(regs->psw, regs->int_code >> 16);
305 clear_pt_regs_flag(regs, PIF_PER_TRAP);
306 return;
307 }
308 if (current->thread.fpu.fpc & FPC_DXC_MASK) 269 if (current->thread.fpu.fpc & FPC_DXC_MASK)
309 signal = SIGFPE; 270 signal = SIGFPE;
310 else 271 else
diff --git a/arch/s390/kernel/vdso.c b/arch/s390/kernel/vdso.c
index 0d58269ff425..59eddb0e1a3e 100644
--- a/arch/s390/kernel/vdso.c
+++ b/arch/s390/kernel/vdso.c
@@ -299,7 +299,7 @@ static int __init vdso_init(void)
299 299
300 get_page(virt_to_page(vdso_data)); 300 get_page(virt_to_page(vdso_data));
301 301
302 smp_wmb(); 302 smp_mb();
303 303
304 return 0; 304 return 0;
305} 305}
diff --git a/arch/s390/kvm/kvm-s390.c b/arch/s390/kvm/kvm-s390.c
index 0a67c40eece9..c6b4063fce29 100644
--- a/arch/s390/kvm/kvm-s390.c
+++ b/arch/s390/kvm/kvm-s390.c
@@ -1292,7 +1292,6 @@ int kvm_arch_vcpu_init(struct kvm_vcpu *vcpu)
1292static inline void save_fpu_to(struct fpu *dst) 1292static inline void save_fpu_to(struct fpu *dst)
1293{ 1293{
1294 dst->fpc = current->thread.fpu.fpc; 1294 dst->fpc = current->thread.fpu.fpc;
1295 dst->flags = current->thread.fpu.flags;
1296 dst->regs = current->thread.fpu.regs; 1295 dst->regs = current->thread.fpu.regs;
1297} 1296}
1298 1297
@@ -1303,7 +1302,6 @@ static inline void save_fpu_to(struct fpu *dst)
1303static inline void load_fpu_from(struct fpu *from) 1302static inline void load_fpu_from(struct fpu *from)
1304{ 1303{
1305 current->thread.fpu.fpc = from->fpc; 1304 current->thread.fpu.fpc = from->fpc;
1306 current->thread.fpu.flags = from->flags;
1307 current->thread.fpu.regs = from->regs; 1305 current->thread.fpu.regs = from->regs;
1308} 1306}
1309 1307
@@ -1315,15 +1313,12 @@ void kvm_arch_vcpu_load(struct kvm_vcpu *vcpu, int cpu)
1315 1313
1316 if (test_kvm_facility(vcpu->kvm, 129)) { 1314 if (test_kvm_facility(vcpu->kvm, 129)) {
1317 current->thread.fpu.fpc = vcpu->run->s.regs.fpc; 1315 current->thread.fpu.fpc = vcpu->run->s.regs.fpc;
1318 current->thread.fpu.flags = FPU_USE_VX;
1319 /* 1316 /*
1320 * Use the register save area in the SIE-control block 1317 * Use the register save area in the SIE-control block
1321 * for register restore and save in kvm_arch_vcpu_put() 1318 * for register restore and save in kvm_arch_vcpu_put()
1322 */ 1319 */
1323 current->thread.fpu.vxrs = 1320 current->thread.fpu.vxrs =
1324 (__vector128 *)&vcpu->run->s.regs.vrs; 1321 (__vector128 *)&vcpu->run->s.regs.vrs;
1325 /* Always enable the vector extension for KVM */
1326 __ctl_set_vx();
1327 } else 1322 } else
1328 load_fpu_from(&vcpu->arch.guest_fpregs); 1323 load_fpu_from(&vcpu->arch.guest_fpregs);
1329 1324
@@ -2326,7 +2321,6 @@ int kvm_s390_vcpu_store_status(struct kvm_vcpu *vcpu, unsigned long addr)
2326 * registers and the FPC value and store them in the 2321 * registers and the FPC value and store them in the
2327 * guest_fpregs structure. 2322 * guest_fpregs structure.
2328 */ 2323 */
2329 WARN_ON(!is_vx_task(current)); /* XXX remove later */
2330 vcpu->arch.guest_fpregs.fpc = current->thread.fpu.fpc; 2324 vcpu->arch.guest_fpregs.fpc = current->thread.fpu.fpc;
2331 convert_vx_to_fp(vcpu->arch.guest_fpregs.fprs, 2325 convert_vx_to_fp(vcpu->arch.guest_fpregs.fprs,
2332 current->thread.fpu.vxrs); 2326 current->thread.fpu.vxrs);
diff --git a/arch/s390/lib/delay.c b/arch/s390/lib/delay.c
index 246a7eb4b680..501dcd4ca4a0 100644
--- a/arch/s390/lib/delay.c
+++ b/arch/s390/lib/delay.c
@@ -12,8 +12,10 @@
12#include <linux/module.h> 12#include <linux/module.h>
13#include <linux/irqflags.h> 13#include <linux/irqflags.h>
14#include <linux/interrupt.h> 14#include <linux/interrupt.h>
15#include <linux/irq.h>
15#include <asm/vtimer.h> 16#include <asm/vtimer.h>
16#include <asm/div64.h> 17#include <asm/div64.h>
18#include <asm/idle.h>
17 19
18void __delay(unsigned long loops) 20void __delay(unsigned long loops)
19{ 21{
@@ -30,26 +32,22 @@ EXPORT_SYMBOL(__delay);
30 32
31static void __udelay_disabled(unsigned long long usecs) 33static void __udelay_disabled(unsigned long long usecs)
32{ 34{
33 unsigned long cr0, cr6, new; 35 unsigned long cr0, cr0_new, psw_mask;
34 u64 clock_saved, end; 36 struct s390_idle_data idle;
37 u64 end;
35 38
36 end = get_tod_clock() + (usecs << 12); 39 end = get_tod_clock() + (usecs << 12);
37 clock_saved = local_tick_disable();
38 __ctl_store(cr0, 0, 0); 40 __ctl_store(cr0, 0, 0);
39 __ctl_store(cr6, 6, 6); 41 cr0_new = cr0 & ~CR0_IRQ_SUBCLASS_MASK;
40 new = (cr0 & 0xffff00e0) | 0x00000800; 42 cr0_new |= (1UL << (63 - 52)); /* enable clock comparator irq */
41 __ctl_load(new , 0, 0); 43 __ctl_load(cr0_new, 0, 0);
42 new = 0; 44 psw_mask = __extract_psw() | PSW_MASK_EXT | PSW_MASK_WAIT;
43 __ctl_load(new, 6, 6); 45 set_clock_comparator(end);
44 lockdep_off(); 46 set_cpu_flag(CIF_IGNORE_IRQ);
45 do { 47 psw_idle(&idle, psw_mask);
46 set_clock_comparator(end); 48 clear_cpu_flag(CIF_IGNORE_IRQ);
47 enabled_wait(); 49 set_clock_comparator(S390_lowcore.clock_comparator);
48 } while (get_tod_clock_fast() < end);
49 lockdep_on();
50 __ctl_load(cr0, 0, 0); 50 __ctl_load(cr0, 0, 0);
51 __ctl_load(cr6, 6, 6);
52 local_tick_enable(clock_saved);
53} 51}
54 52
55static void __udelay_enabled(unsigned long long usecs) 53static void __udelay_enabled(unsigned long long usecs)
diff --git a/arch/s390/lib/find.c b/arch/s390/lib/find.c
index 922003c1b90d..d90b9245ea41 100644
--- a/arch/s390/lib/find.c
+++ b/arch/s390/lib/find.c
@@ -1,10 +1,8 @@
1/* 1/*
2 * MSB0 numbered special bitops handling. 2 * MSB0 numbered special bitops handling.
3 * 3 *
4 * On s390x the bits are numbered: 4 * The bits are numbered:
5 * |0..............63|64............127|128...........191|192...........255| 5 * |0..............63|64............127|128...........191|192...........255|
6 * and on s390:
7 * |0.....31|32....63|64....95|96...127|128..159|160..191|192..223|224..255|
8 * 6 *
9 * The reason for this bit numbering is the fact that the hardware sets bits 7 * The reason for this bit numbering is the fact that the hardware sets bits
10 * in a bitmap starting at bit 0 (MSB) and we don't want to scan the bitmap 8 * in a bitmap starting at bit 0 (MSB) and we don't want to scan the bitmap
diff --git a/arch/s390/lib/spinlock.c b/arch/s390/lib/spinlock.c
index d6c9991f7797..427aa44b3505 100644
--- a/arch/s390/lib/spinlock.c
+++ b/arch/s390/lib/spinlock.c
@@ -197,7 +197,7 @@ void _raw_write_lock_wait(arch_rwlock_t *rw, unsigned int prev)
197 } 197 }
198 old = ACCESS_ONCE(rw->lock); 198 old = ACCESS_ONCE(rw->lock);
199 owner = ACCESS_ONCE(rw->owner); 199 owner = ACCESS_ONCE(rw->owner);
200 smp_rmb(); 200 smp_mb();
201 if ((int) old >= 0) { 201 if ((int) old >= 0) {
202 prev = __RAW_LOCK(&rw->lock, 0x80000000, __RAW_OP_OR); 202 prev = __RAW_LOCK(&rw->lock, 0x80000000, __RAW_OP_OR);
203 old = prev; 203 old = prev;
@@ -231,7 +231,7 @@ void _raw_write_lock_wait(arch_rwlock_t *rw)
231 _raw_compare_and_swap(&rw->lock, old, old | 0x80000000)) 231 _raw_compare_and_swap(&rw->lock, old, old | 0x80000000))
232 prev = old; 232 prev = old;
233 else 233 else
234 smp_rmb(); 234 smp_mb();
235 if ((old & 0x7fffffff) == 0 && (int) prev >= 0) 235 if ((old & 0x7fffffff) == 0 && (int) prev >= 0)
236 break; 236 break;
237 if (MACHINE_HAS_CAD) 237 if (MACHINE_HAS_CAD)
diff --git a/arch/s390/mm/extmem.c b/arch/s390/mm/extmem.c
index 23c496957c22..18fccc303db7 100644
--- a/arch/s390/mm/extmem.c
+++ b/arch/s390/mm/extmem.c
@@ -18,6 +18,7 @@
18#include <linux/bootmem.h> 18#include <linux/bootmem.h>
19#include <linux/ctype.h> 19#include <linux/ctype.h>
20#include <linux/ioport.h> 20#include <linux/ioport.h>
21#include <asm/diag.h>
21#include <asm/page.h> 22#include <asm/page.h>
22#include <asm/pgtable.h> 23#include <asm/pgtable.h>
23#include <asm/ebcdic.h> 24#include <asm/ebcdic.h>
@@ -112,6 +113,7 @@ dcss_set_subcodes(void)
112 ry = DCSS_FINDSEGX; 113 ry = DCSS_FINDSEGX;
113 114
114 strcpy(name, "dummy"); 115 strcpy(name, "dummy");
116 diag_stat_inc(DIAG_STAT_X064);
115 asm volatile( 117 asm volatile(
116 " diag %0,%1,0x64\n" 118 " diag %0,%1,0x64\n"
117 "0: ipm %2\n" 119 "0: ipm %2\n"
@@ -205,6 +207,7 @@ dcss_diag(int *func, void *parameter,
205 ry = (unsigned long) *func; 207 ry = (unsigned long) *func;
206 208
207 /* 64-bit Diag x'64' new subcode, keep in 64-bit addressing mode */ 209 /* 64-bit Diag x'64' new subcode, keep in 64-bit addressing mode */
210 diag_stat_inc(DIAG_STAT_X064);
208 if (*func > DCSS_SEGEXT) 211 if (*func > DCSS_SEGEXT)
209 asm volatile( 212 asm volatile(
210 " diag %0,%1,0x64\n" 213 " diag %0,%1,0x64\n"
diff --git a/arch/s390/mm/fault.c b/arch/s390/mm/fault.c
index f985856a538b..ec1a30d0d11a 100644
--- a/arch/s390/mm/fault.c
+++ b/arch/s390/mm/fault.c
@@ -30,6 +30,7 @@
30#include <linux/uaccess.h> 30#include <linux/uaccess.h>
31#include <linux/hugetlb.h> 31#include <linux/hugetlb.h>
32#include <asm/asm-offsets.h> 32#include <asm/asm-offsets.h>
33#include <asm/diag.h>
33#include <asm/pgtable.h> 34#include <asm/pgtable.h>
34#include <asm/irq.h> 35#include <asm/irq.h>
35#include <asm/mmu_context.h> 36#include <asm/mmu_context.h>
@@ -589,7 +590,7 @@ int pfault_init(void)
589 .reffcode = 0, 590 .reffcode = 0,
590 .refdwlen = 5, 591 .refdwlen = 5,
591 .refversn = 2, 592 .refversn = 2,
592 .refgaddr = __LC_CURRENT_PID, 593 .refgaddr = __LC_LPP,
593 .refselmk = 1ULL << 48, 594 .refselmk = 1ULL << 48,
594 .refcmpmk = 1ULL << 48, 595 .refcmpmk = 1ULL << 48,
595 .reserved = __PF_RES_FIELD }; 596 .reserved = __PF_RES_FIELD };
@@ -597,6 +598,7 @@ int pfault_init(void)
597 598
598 if (pfault_disable) 599 if (pfault_disable)
599 return -1; 600 return -1;
601 diag_stat_inc(DIAG_STAT_X258);
600 asm volatile( 602 asm volatile(
601 " diag %1,%0,0x258\n" 603 " diag %1,%0,0x258\n"
602 "0: j 2f\n" 604 "0: j 2f\n"
@@ -618,6 +620,7 @@ void pfault_fini(void)
618 620
619 if (pfault_disable) 621 if (pfault_disable)
620 return; 622 return;
623 diag_stat_inc(DIAG_STAT_X258);
621 asm volatile( 624 asm volatile(
622 " diag %0,0,0x258\n" 625 " diag %0,0,0x258\n"
623 "0:\n" 626 "0:\n"
@@ -646,7 +649,7 @@ static void pfault_interrupt(struct ext_code ext_code,
646 return; 649 return;
647 inc_irq_stat(IRQEXT_PFL); 650 inc_irq_stat(IRQEXT_PFL);
648 /* Get the token (= pid of the affected task). */ 651 /* Get the token (= pid of the affected task). */
649 pid = param64; 652 pid = param64 & LPP_PFAULT_PID_MASK;
650 rcu_read_lock(); 653 rcu_read_lock();
651 tsk = find_task_by_pid_ns(pid, &init_pid_ns); 654 tsk = find_task_by_pid_ns(pid, &init_pid_ns);
652 if (tsk) 655 if (tsk)
diff --git a/arch/s390/mm/hugetlbpage.c b/arch/s390/mm/hugetlbpage.c
index fb4bf2c4379e..f81096b6940d 100644
--- a/arch/s390/mm/hugetlbpage.c
+++ b/arch/s390/mm/hugetlbpage.c
@@ -40,6 +40,7 @@ static inline pmd_t __pte_to_pmd(pte_t pte)
40 pmd_val(pmd) |= (pte_val(pte) & _PAGE_PROTECT); 40 pmd_val(pmd) |= (pte_val(pte) & _PAGE_PROTECT);
41 pmd_val(pmd) |= (pte_val(pte) & _PAGE_DIRTY) << 10; 41 pmd_val(pmd) |= (pte_val(pte) & _PAGE_DIRTY) << 10;
42 pmd_val(pmd) |= (pte_val(pte) & _PAGE_YOUNG) << 10; 42 pmd_val(pmd) |= (pte_val(pte) & _PAGE_YOUNG) << 10;
43 pmd_val(pmd) |= (pte_val(pte) & _PAGE_SOFT_DIRTY) << 13;
43 } else 44 } else
44 pmd_val(pmd) = _SEGMENT_ENTRY_INVALID; 45 pmd_val(pmd) = _SEGMENT_ENTRY_INVALID;
45 return pmd; 46 return pmd;
@@ -78,6 +79,7 @@ static inline pte_t __pmd_to_pte(pmd_t pmd)
78 pte_val(pte) |= (pmd_val(pmd) & _SEGMENT_ENTRY_PROTECT); 79 pte_val(pte) |= (pmd_val(pmd) & _SEGMENT_ENTRY_PROTECT);
79 pte_val(pte) |= (pmd_val(pmd) & _SEGMENT_ENTRY_DIRTY) >> 10; 80 pte_val(pte) |= (pmd_val(pmd) & _SEGMENT_ENTRY_DIRTY) >> 10;
80 pte_val(pte) |= (pmd_val(pmd) & _SEGMENT_ENTRY_YOUNG) >> 10; 81 pte_val(pte) |= (pmd_val(pmd) & _SEGMENT_ENTRY_YOUNG) >> 10;
82 pte_val(pte) |= (pmd_val(pmd) & _SEGMENT_ENTRY_SOFT_DIRTY) >> 13;
81 } else 83 } else
82 pte_val(pte) = _PAGE_INVALID; 84 pte_val(pte) = _PAGE_INVALID;
83 return pte; 85 return pte;
diff --git a/arch/s390/numa/mode_emu.c b/arch/s390/numa/mode_emu.c
index 30b2698a28e2..828d0695d0d4 100644
--- a/arch/s390/numa/mode_emu.c
+++ b/arch/s390/numa/mode_emu.c
@@ -436,9 +436,15 @@ static void emu_update_cpu_topology(void)
436 */ 436 */
437static unsigned long emu_setup_size_adjust(unsigned long size) 437static unsigned long emu_setup_size_adjust(unsigned long size)
438{ 438{
439 unsigned long size_new;
440
439 size = size ? : CONFIG_EMU_SIZE; 441 size = size ? : CONFIG_EMU_SIZE;
440 size = roundup(size, memory_block_size_bytes()); 442 size_new = roundup(size, memory_block_size_bytes());
441 return size; 443 if (size_new == size)
444 return size;
445 pr_warn("Increasing memory stripe size from %ld MB to %ld MB\n",
446 size >> 20, size_new >> 20);
447 return size_new;
442} 448}
443 449
444/* 450/*
diff --git a/arch/s390/pci/pci_insn.c b/arch/s390/pci/pci_insn.c
index dcc2634ccbe2..10ca15dcab11 100644
--- a/arch/s390/pci/pci_insn.c
+++ b/arch/s390/pci/pci_insn.c
@@ -16,11 +16,11 @@
16static inline void zpci_err_insn(u8 cc, u8 status, u64 req, u64 offset) 16static inline void zpci_err_insn(u8 cc, u8 status, u64 req, u64 offset)
17{ 17{
18 struct { 18 struct {
19 u8 cc;
20 u8 status;
21 u64 req; 19 u64 req;
22 u64 offset; 20 u64 offset;
23 } data = {cc, status, req, offset}; 21 u8 cc;
22 u8 status;
23 } __packed data = {req, offset, cc, status};
24 24
25 zpci_err_hex(&data, sizeof(data)); 25 zpci_err_hex(&data, sizeof(data));
26} 26}
diff --git a/arch/x86/include/asm/pgtable.h b/arch/x86/include/asm/pgtable.h
index c0b41f111a9a..6ec0c8b2e9df 100644
--- a/arch/x86/include/asm/pgtable.h
+++ b/arch/x86/include/asm/pgtable.h
@@ -325,6 +325,16 @@ static inline pmd_t pmd_mksoft_dirty(pmd_t pmd)
325 return pmd_set_flags(pmd, _PAGE_SOFT_DIRTY); 325 return pmd_set_flags(pmd, _PAGE_SOFT_DIRTY);
326} 326}
327 327
328static inline pte_t pte_clear_soft_dirty(pte_t pte)
329{
330 return pte_clear_flags(pte, _PAGE_SOFT_DIRTY);
331}
332
333static inline pmd_t pmd_clear_soft_dirty(pmd_t pmd)
334{
335 return pmd_clear_flags(pmd, _PAGE_SOFT_DIRTY);
336}
337
328#endif /* CONFIG_HAVE_ARCH_SOFT_DIRTY */ 338#endif /* CONFIG_HAVE_ARCH_SOFT_DIRTY */
329 339
330/* 340/*
diff --git a/drivers/s390/block/dasd.c b/drivers/s390/block/dasd.c
index f73d2f579a7e..a263c10359e1 100644
--- a/drivers/s390/block/dasd.c
+++ b/drivers/s390/block/dasd.c
@@ -3030,6 +3030,7 @@ static void dasd_setup_queue(struct dasd_block *block)
3030 } else { 3030 } else {
3031 max = block->base->discipline->max_blocks << block->s2b_shift; 3031 max = block->base->discipline->max_blocks << block->s2b_shift;
3032 } 3032 }
3033 queue_flag_set_unlocked(QUEUE_FLAG_NONROT, block->request_queue);
3033 blk_queue_logical_block_size(block->request_queue, 3034 blk_queue_logical_block_size(block->request_queue,
3034 block->bp_block); 3035 block->bp_block);
3035 blk_queue_max_hw_sectors(block->request_queue, max); 3036 blk_queue_max_hw_sectors(block->request_queue, max);
diff --git a/drivers/s390/block/dasd_alias.c b/drivers/s390/block/dasd_alias.c
index fe07f3139bf6..184b1dbeb554 100644
--- a/drivers/s390/block/dasd_alias.c
+++ b/drivers/s390/block/dasd_alias.c
@@ -824,8 +824,11 @@ static void flush_all_alias_devices_on_lcu(struct alias_lcu *lcu)
824 * were waiting for the flush 824 * were waiting for the flush
825 */ 825 */
826 if (device == list_first_entry(&active, 826 if (device == list_first_entry(&active,
827 struct dasd_device, alias_list)) 827 struct dasd_device, alias_list)) {
828 list_move(&device->alias_list, &lcu->active_devices); 828 list_move(&device->alias_list, &lcu->active_devices);
829 private = (struct dasd_eckd_private *) device->private;
830 private->pavgroup = NULL;
831 }
829 } 832 }
830 spin_unlock_irqrestore(&lcu->lock, flags); 833 spin_unlock_irqrestore(&lcu->lock, flags);
831} 834}
diff --git a/drivers/s390/block/dasd_diag.c b/drivers/s390/block/dasd_diag.c
index c062f1620c58..cb61f300f8b5 100644
--- a/drivers/s390/block/dasd_diag.c
+++ b/drivers/s390/block/dasd_diag.c
@@ -21,6 +21,7 @@
21 21
22#include <asm/dasd.h> 22#include <asm/dasd.h>
23#include <asm/debug.h> 23#include <asm/debug.h>
24#include <asm/diag.h>
24#include <asm/ebcdic.h> 25#include <asm/ebcdic.h>
25#include <asm/io.h> 26#include <asm/io.h>
26#include <asm/irq.h> 27#include <asm/irq.h>
@@ -76,6 +77,7 @@ static inline int dia250(void *iob, int cmd)
76 int rc; 77 int rc;
77 78
78 rc = 3; 79 rc = 3;
80 diag_stat_inc(DIAG_STAT_X250);
79 asm volatile( 81 asm volatile(
80 " diag 2,%2,0x250\n" 82 " diag 2,%2,0x250\n"
81 "0: ipm %0\n" 83 "0: ipm %0\n"
diff --git a/drivers/s390/block/dasd_eckd.c b/drivers/s390/block/dasd_eckd.c
index 62a323539226..9083247f55a8 100644
--- a/drivers/s390/block/dasd_eckd.c
+++ b/drivers/s390/block/dasd_eckd.c
@@ -1032,6 +1032,21 @@ static unsigned char dasd_eckd_path_access(void *conf_data, int conf_len)
1032 return 0; 1032 return 0;
1033} 1033}
1034 1034
1035static void dasd_eckd_clear_conf_data(struct dasd_device *device)
1036{
1037 struct dasd_eckd_private *private;
1038 int i;
1039
1040 private = (struct dasd_eckd_private *) device->private;
1041 private->conf_data = NULL;
1042 private->conf_len = 0;
1043 for (i = 0; i < 8; i++) {
1044 kfree(private->path_conf_data[i]);
1045 private->path_conf_data[i] = NULL;
1046 }
1047}
1048
1049
1035static int dasd_eckd_read_conf(struct dasd_device *device) 1050static int dasd_eckd_read_conf(struct dasd_device *device)
1036{ 1051{
1037 void *conf_data; 1052 void *conf_data;
@@ -1068,20 +1083,10 @@ static int dasd_eckd_read_conf(struct dasd_device *device)
1068 path_data->opm |= lpm; 1083 path_data->opm |= lpm;
1069 continue; /* no error */ 1084 continue; /* no error */
1070 } 1085 }
1071 /* translate path mask to position in mask */
1072 pos = 8 - ffs(lpm);
1073 kfree(private->path_conf_data[pos]);
1074 if ((__u8 *)private->path_conf_data[pos] ==
1075 private->conf_data) {
1076 private->conf_data = NULL;
1077 private->conf_len = 0;
1078 conf_data_saved = 0;
1079 }
1080 private->path_conf_data[pos] =
1081 (struct dasd_conf_data *) conf_data;
1082 /* save first valid configuration data */ 1086 /* save first valid configuration data */
1083 if (!conf_data_saved) { 1087 if (!conf_data_saved) {
1084 kfree(private->conf_data); 1088 /* initially clear previously stored conf_data */
1089 dasd_eckd_clear_conf_data(device);
1085 private->conf_data = conf_data; 1090 private->conf_data = conf_data;
1086 private->conf_len = conf_len; 1091 private->conf_len = conf_len;
1087 if (dasd_eckd_identify_conf_parts(private)) { 1092 if (dasd_eckd_identify_conf_parts(private)) {
@@ -1090,6 +1095,10 @@ static int dasd_eckd_read_conf(struct dasd_device *device)
1090 kfree(conf_data); 1095 kfree(conf_data);
1091 continue; 1096 continue;
1092 } 1097 }
1098 pos = pathmask_to_pos(lpm);
1099 /* store per path conf_data */
1100 private->path_conf_data[pos] =
1101 (struct dasd_conf_data *) conf_data;
1093 /* 1102 /*
1094 * build device UID that other path data 1103 * build device UID that other path data
1095 * can be compared to it 1104 * can be compared to it
@@ -1147,7 +1156,10 @@ static int dasd_eckd_read_conf(struct dasd_device *device)
1147 path_data->cablepm |= lpm; 1156 path_data->cablepm |= lpm;
1148 continue; 1157 continue;
1149 } 1158 }
1150 1159 pos = pathmask_to_pos(lpm);
1160 /* store per path conf_data */
1161 private->path_conf_data[pos] =
1162 (struct dasd_conf_data *) conf_data;
1151 path_private.conf_data = NULL; 1163 path_private.conf_data = NULL;
1152 path_private.conf_len = 0; 1164 path_private.conf_len = 0;
1153 } 1165 }
@@ -1159,7 +1171,12 @@ static int dasd_eckd_read_conf(struct dasd_device *device)
1159 path_data->ppm |= lpm; 1171 path_data->ppm |= lpm;
1160 break; 1172 break;
1161 } 1173 }
1162 path_data->opm |= lpm; 1174 if (!path_data->opm) {
1175 path_data->opm = lpm;
1176 dasd_generic_path_operational(device);
1177 } else {
1178 path_data->opm |= lpm;
1179 }
1163 /* 1180 /*
1164 * if the path is used 1181 * if the path is used
1165 * it should not be in one of the negative lists 1182 * it should not be in one of the negative lists
@@ -4423,7 +4440,12 @@ static int dasd_eckd_restore_device(struct dasd_device *device)
4423 private = (struct dasd_eckd_private *) device->private; 4440 private = (struct dasd_eckd_private *) device->private;
4424 4441
4425 /* Read Configuration Data */ 4442 /* Read Configuration Data */
4426 dasd_eckd_read_conf(device); 4443 rc = dasd_eckd_read_conf(device);
4444 if (rc) {
4445 DBF_EVENT_DEVID(DBF_WARNING, device->cdev,
4446 "Read configuration data failed, rc=%d", rc);
4447 goto out_err;
4448 }
4427 4449
4428 dasd_eckd_get_uid(device, &temp_uid); 4450 dasd_eckd_get_uid(device, &temp_uid);
4429 /* Generate device unique id */ 4451 /* Generate device unique id */
@@ -4439,13 +4461,18 @@ static int dasd_eckd_restore_device(struct dasd_device *device)
4439 /* register lcu with alias handling, enable PAV if this is a new lcu */ 4461 /* register lcu with alias handling, enable PAV if this is a new lcu */
4440 rc = dasd_alias_make_device_known_to_lcu(device); 4462 rc = dasd_alias_make_device_known_to_lcu(device);
4441 if (rc) 4463 if (rc)
4442 return rc; 4464 goto out_err;
4443 4465
4444 set_bit(DASD_CQR_FLAGS_FAILFAST, &cqr_flags); 4466 set_bit(DASD_CQR_FLAGS_FAILFAST, &cqr_flags);
4445 dasd_eckd_validate_server(device, cqr_flags); 4467 dasd_eckd_validate_server(device, cqr_flags);
4446 4468
4447 /* RE-Read Configuration Data */ 4469 /* RE-Read Configuration Data */
4448 dasd_eckd_read_conf(device); 4470 rc = dasd_eckd_read_conf(device);
4471 if (rc) {
4472 DBF_EVENT_DEVID(DBF_WARNING, device->cdev,
4473 "Read configuration data failed, rc=%d", rc);
4474 goto out_err2;
4475 }
4449 4476
4450 /* Read Feature Codes */ 4477 /* Read Feature Codes */
4451 dasd_eckd_read_features(device); 4478 dasd_eckd_read_features(device);
@@ -4456,7 +4483,7 @@ static int dasd_eckd_restore_device(struct dasd_device *device)
4456 if (rc) { 4483 if (rc) {
4457 DBF_EVENT_DEVID(DBF_WARNING, device->cdev, 4484 DBF_EVENT_DEVID(DBF_WARNING, device->cdev,
4458 "Read device characteristic failed, rc=%d", rc); 4485 "Read device characteristic failed, rc=%d", rc);
4459 goto out_err; 4486 goto out_err2;
4460 } 4487 }
4461 spin_lock_irqsave(get_ccwdev_lock(device->cdev), flags); 4488 spin_lock_irqsave(get_ccwdev_lock(device->cdev), flags);
4462 memcpy(&private->rdc_data, &temp_rdc_data, sizeof(temp_rdc_data)); 4489 memcpy(&private->rdc_data, &temp_rdc_data, sizeof(temp_rdc_data));
@@ -4467,6 +4494,8 @@ static int dasd_eckd_restore_device(struct dasd_device *device)
4467 4494
4468 return 0; 4495 return 0;
4469 4496
4497out_err2:
4498 dasd_alias_disconnect_device_from_lcu(device);
4470out_err: 4499out_err:
4471 return -1; 4500 return -1;
4472} 4501}
@@ -4671,7 +4700,7 @@ static struct dasd_conf_data *dasd_eckd_get_ref_conf(struct dasd_device *device,
4671 return conf_data; 4700 return conf_data;
4672 } 4701 }
4673out: 4702out:
4674 return private->path_conf_data[8 - ffs(lpum)]; 4703 return private->path_conf_data[pathmask_to_pos(lpum)];
4675} 4704}
4676 4705
4677/* 4706/*
@@ -4716,7 +4745,7 @@ static int dasd_eckd_cuir_scope(struct dasd_device *device, __u8 lpum,
4716 for (path = 0x80; path; path >>= 1) { 4745 for (path = 0x80; path; path >>= 1) {
4717 /* initialise data per path */ 4746 /* initialise data per path */
4718 bitmask = mask; 4747 bitmask = mask;
4719 pos = 8 - ffs(path); 4748 pos = pathmask_to_pos(path);
4720 conf_data = private->path_conf_data[pos]; 4749 conf_data = private->path_conf_data[pos];
4721 pos = 8 - ffs(cuir->ned_map); 4750 pos = 8 - ffs(cuir->ned_map);
4722 ned = (char *) &conf_data->neds[pos]; 4751 ned = (char *) &conf_data->neds[pos];
@@ -4937,9 +4966,7 @@ static void dasd_eckd_handle_cuir(struct dasd_device *device, void *messages,
4937 ((u64 *)cuir)[0], ((u64 *)cuir)[1], ((u64 *)cuir)[2], 4966 ((u64 *)cuir)[0], ((u64 *)cuir)[1], ((u64 *)cuir)[2],
4938 ((u32 *)cuir)[3]); 4967 ((u32 *)cuir)[3]);
4939 ccw_device_get_schid(device->cdev, &sch_id); 4968 ccw_device_get_schid(device->cdev, &sch_id);
4940 /* get position of path in mask */ 4969 pos = pathmask_to_pos(lpum);
4941 pos = 8 - ffs(lpum);
4942 /* get channel path descriptor from this position */
4943 desc = ccw_device_get_chp_desc(device->cdev, pos); 4970 desc = ccw_device_get_chp_desc(device->cdev, pos);
4944 4971
4945 if (cuir->code == CUIR_QUIESCE) { 4972 if (cuir->code == CUIR_QUIESCE) {
diff --git a/drivers/s390/char/diag_ftp.c b/drivers/s390/char/diag_ftp.c
index 12db8db04cdd..a5ccbf6f0d36 100644
--- a/drivers/s390/char/diag_ftp.c
+++ b/drivers/s390/char/diag_ftp.c
@@ -15,6 +15,7 @@
15#include <linux/wait.h> 15#include <linux/wait.h>
16#include <linux/string.h> 16#include <linux/string.h>
17#include <asm/ctl_reg.h> 17#include <asm/ctl_reg.h>
18#include <asm/diag.h>
18 19
19#include "hmcdrv_ftp.h" 20#include "hmcdrv_ftp.h"
20#include "diag_ftp.h" 21#include "diag_ftp.h"
@@ -102,6 +103,7 @@ static int diag_ftp_2c4(struct diag_ftp_ldfpl *fpl,
102{ 103{
103 int rc; 104 int rc;
104 105
106 diag_stat_inc(DIAG_STAT_X2C4);
105 asm volatile( 107 asm volatile(
106 " diag %[addr],%[cmd],0x2c4\n" 108 " diag %[addr],%[cmd],0x2c4\n"
107 "0: j 2f\n" 109 "0: j 2f\n"
diff --git a/drivers/s390/char/sclp_rw.c b/drivers/s390/char/sclp_rw.c
index 35a84af875ee..6010cd347a08 100644
--- a/drivers/s390/char/sclp_rw.c
+++ b/drivers/s390/char/sclp_rw.c
@@ -47,9 +47,9 @@ struct sclp_buffer *
47sclp_make_buffer(void *page, unsigned short columns, unsigned short htab) 47sclp_make_buffer(void *page, unsigned short columns, unsigned short htab)
48{ 48{
49 struct sclp_buffer *buffer; 49 struct sclp_buffer *buffer;
50 struct write_sccb *sccb; 50 struct sccb_header *sccb;
51 51
52 sccb = (struct write_sccb *) page; 52 sccb = (struct sccb_header *) page;
53 /* 53 /*
54 * We keep the struct sclp_buffer structure at the end 54 * We keep the struct sclp_buffer structure at the end
55 * of the sccb page. 55 * of the sccb page.
@@ -57,24 +57,16 @@ sclp_make_buffer(void *page, unsigned short columns, unsigned short htab)
57 buffer = ((struct sclp_buffer *) ((addr_t) sccb + PAGE_SIZE)) - 1; 57 buffer = ((struct sclp_buffer *) ((addr_t) sccb + PAGE_SIZE)) - 1;
58 buffer->sccb = sccb; 58 buffer->sccb = sccb;
59 buffer->retry_count = 0; 59 buffer->retry_count = 0;
60 buffer->mto_number = 0; 60 buffer->messages = 0;
61 buffer->mto_char_sum = 0; 61 buffer->char_sum = 0;
62 buffer->current_line = NULL; 62 buffer->current_line = NULL;
63 buffer->current_length = 0; 63 buffer->current_length = 0;
64 buffer->columns = columns; 64 buffer->columns = columns;
65 buffer->htab = htab; 65 buffer->htab = htab;
66 66
67 /* initialize sccb */ 67 /* initialize sccb */
68 memset(sccb, 0, sizeof(struct write_sccb)); 68 memset(sccb, 0, sizeof(struct sccb_header));
69 sccb->header.length = sizeof(struct write_sccb); 69 sccb->length = sizeof(struct sccb_header);
70 sccb->msg_buf.header.length = sizeof(struct msg_buf);
71 sccb->msg_buf.header.type = EVTYP_MSG;
72 sccb->msg_buf.mdb.header.length = sizeof(struct mdb);
73 sccb->msg_buf.mdb.header.type = 1;
74 sccb->msg_buf.mdb.header.tag = 0xD4C4C240; /* ebcdic "MDB " */
75 sccb->msg_buf.mdb.header.revision_code = 1;
76 sccb->msg_buf.mdb.go.length = sizeof(struct go);
77 sccb->msg_buf.mdb.go.type = 1;
78 70
79 return buffer; 71 return buffer;
80} 72}
@@ -90,37 +82,49 @@ sclp_unmake_buffer(struct sclp_buffer *buffer)
90} 82}
91 83
92/* 84/*
93 * Initialize a new Message Text Object (MTO) at the end of the provided buffer 85 * Initialize a new message the end of the provided buffer with
94 * with enough room for max_len characters. Return 0 on success. 86 * enough room for max_len characters. Return 0 on success.
95 */ 87 */
96static int 88static int
97sclp_initialize_mto(struct sclp_buffer *buffer, int max_len) 89sclp_initialize_mto(struct sclp_buffer *buffer, int max_len)
98{ 90{
99 struct write_sccb *sccb; 91 struct sccb_header *sccb;
92 struct msg_buf *msg;
93 struct mdb *mdb;
94 struct go *go;
100 struct mto *mto; 95 struct mto *mto;
101 int mto_size; 96 int msg_size;
102 97
103 /* max size of new Message Text Object including message text */ 98 /* max size of new message including message text */
104 mto_size = sizeof(struct mto) + max_len; 99 msg_size = sizeof(struct msg_buf) + max_len;
105 100
106 /* check if current buffer sccb can contain the mto */ 101 /* check if current buffer sccb can contain the mto */
107 sccb = buffer->sccb; 102 sccb = buffer->sccb;
108 if ((MAX_SCCB_ROOM - sccb->header.length) < mto_size) 103 if ((MAX_SCCB_ROOM - sccb->length) < msg_size)
109 return -ENOMEM; 104 return -ENOMEM;
110 105
111 /* find address of new message text object */ 106 msg = (struct msg_buf *)((addr_t) sccb + sccb->length);
112 mto = (struct mto *)(((addr_t) sccb) + sccb->header.length); 107 memset(msg, 0, sizeof(struct msg_buf));
108 msg->header.length = sizeof(struct msg_buf);
109 msg->header.type = EVTYP_MSG;
113 110
114 /* 111 mdb = &msg->mdb;
115 * fill the new Message-Text Object, 112 mdb->header.length = sizeof(struct mdb);
116 * starting behind the former last byte of the SCCB 113 mdb->header.type = 1;
117 */ 114 mdb->header.tag = 0xD4C4C240; /* ebcdic "MDB " */
118 memset(mto, 0, sizeof(struct mto)); 115 mdb->header.revision_code = 1;
116
117 go = &mdb->go;
118 go->length = sizeof(struct go);
119 go->type = 1;
120
121 mto = &mdb->mto;
119 mto->length = sizeof(struct mto); 122 mto->length = sizeof(struct mto);
120 mto->type = 4; /* message text object */ 123 mto->type = 4; /* message text object */
121 mto->line_type_flags = LNTPFLGS_ENDTEXT; /* end text */ 124 mto->line_type_flags = LNTPFLGS_ENDTEXT; /* end text */
122 125
123 /* set pointer to first byte after struct mto. */ 126 /* set pointer to first byte after struct mto. */
127 buffer->current_msg = msg;
124 buffer->current_line = (char *) (mto + 1); 128 buffer->current_line = (char *) (mto + 1);
125 buffer->current_length = 0; 129 buffer->current_length = 0;
126 130
@@ -128,45 +132,37 @@ sclp_initialize_mto(struct sclp_buffer *buffer, int max_len)
128} 132}
129 133
130/* 134/*
131 * Finalize MTO initialized by sclp_initialize_mto(), updating the sizes of 135 * Finalize message initialized by sclp_initialize_mto(),
132 * MTO, enclosing MDB, event buffer and SCCB. 136 * updating the sizes of MTO, enclosing MDB, event buffer and SCCB.
133 */ 137 */
134static void 138static void
135sclp_finalize_mto(struct sclp_buffer *buffer) 139sclp_finalize_mto(struct sclp_buffer *buffer)
136{ 140{
137 struct write_sccb *sccb; 141 struct sccb_header *sccb;
138 struct mto *mto; 142 struct msg_buf *msg;
139 int str_len, mto_size;
140
141 str_len = buffer->current_length;
142 buffer->current_line = NULL;
143 buffer->current_length = 0;
144
145 /* real size of new Message Text Object including message text */
146 mto_size = sizeof(struct mto) + str_len;
147
148 /* find address of new message text object */
149 sccb = buffer->sccb;
150 mto = (struct mto *)(((addr_t) sccb) + sccb->header.length);
151
152 /* set size of message text object */
153 mto->length = mto_size;
154 143
155 /* 144 /*
156 * update values of sizes 145 * update values of sizes
157 * (SCCB, Event(Message) Buffer, Message Data Block) 146 * (SCCB, Event(Message) Buffer, Message Data Block)
158 */ 147 */
159 sccb->header.length += mto_size; 148 sccb = buffer->sccb;
160 sccb->msg_buf.header.length += mto_size; 149 msg = buffer->current_msg;
161 sccb->msg_buf.mdb.header.length += mto_size; 150 msg->header.length += buffer->current_length;
151 msg->mdb.header.length += buffer->current_length;
152 msg->mdb.mto.length += buffer->current_length;
153 sccb->length += msg->header.length;
162 154
163 /* 155 /*
164 * count number of buffered messages (= number of Message Text 156 * count number of buffered messages (= number of Message Text
165 * Objects) and number of buffered characters 157 * Objects) and number of buffered characters
166 * for the SCCB currently used for buffering and at all 158 * for the SCCB currently used for buffering and at all
167 */ 159 */
168 buffer->mto_number++; 160 buffer->messages++;
169 buffer->mto_char_sum += str_len; 161 buffer->char_sum += buffer->current_length;
162
163 buffer->current_line = NULL;
164 buffer->current_length = 0;
165 buffer->current_msg = NULL;
170} 166}
171 167
172/* 168/*
@@ -218,7 +214,13 @@ sclp_write(struct sclp_buffer *buffer, const unsigned char *msg, int count)
218 break; 214 break;
219 case '\a': /* bell, one for several times */ 215 case '\a': /* bell, one for several times */
220 /* set SCLP sound alarm bit in General Object */ 216 /* set SCLP sound alarm bit in General Object */
221 buffer->sccb->msg_buf.mdb.go.general_msg_flags |= 217 if (buffer->current_line == NULL) {
218 rc = sclp_initialize_mto(buffer,
219 buffer->columns);
220 if (rc)
221 return i_msg;
222 }
223 buffer->current_msg->mdb.go.general_msg_flags |=
222 GNRLMSGFLGS_SNDALRM; 224 GNRLMSGFLGS_SNDALRM;
223 break; 225 break;
224 case '\t': /* horizontal tabulator */ 226 case '\t': /* horizontal tabulator */
@@ -309,11 +311,13 @@ sclp_write(struct sclp_buffer *buffer, const unsigned char *msg, int count)
309int 311int
310sclp_buffer_space(struct sclp_buffer *buffer) 312sclp_buffer_space(struct sclp_buffer *buffer)
311{ 313{
314 struct sccb_header *sccb;
312 int count; 315 int count;
313 316
314 count = MAX_SCCB_ROOM - buffer->sccb->header.length; 317 sccb = buffer->sccb;
318 count = MAX_SCCB_ROOM - sccb->length;
315 if (buffer->current_line != NULL) 319 if (buffer->current_line != NULL)
316 count -= sizeof(struct mto) + buffer->current_length; 320 count -= sizeof(struct msg_buf) + buffer->current_length;
317 return count; 321 return count;
318} 322}
319 323
@@ -325,7 +329,7 @@ sclp_chars_in_buffer(struct sclp_buffer *buffer)
325{ 329{
326 int count; 330 int count;
327 331
328 count = buffer->mto_char_sum; 332 count = buffer->char_sum;
329 if (buffer->current_line != NULL) 333 if (buffer->current_line != NULL)
330 count += buffer->current_length; 334 count += buffer->current_length;
331 return count; 335 return count;
@@ -378,7 +382,7 @@ sclp_writedata_callback(struct sclp_req *request, void *data)
378{ 382{
379 int rc; 383 int rc;
380 struct sclp_buffer *buffer; 384 struct sclp_buffer *buffer;
381 struct write_sccb *sccb; 385 struct sccb_header *sccb;
382 386
383 buffer = (struct sclp_buffer *) data; 387 buffer = (struct sclp_buffer *) data;
384 sccb = buffer->sccb; 388 sccb = buffer->sccb;
@@ -389,7 +393,7 @@ sclp_writedata_callback(struct sclp_req *request, void *data)
389 return; 393 return;
390 } 394 }
391 /* check SCLP response code and choose suitable action */ 395 /* check SCLP response code and choose suitable action */
392 switch (sccb->header.response_code) { 396 switch (sccb->response_code) {
393 case 0x0020 : 397 case 0x0020 :
394 /* Normal completion, buffer processed, message(s) sent */ 398 /* Normal completion, buffer processed, message(s) sent */
395 rc = 0; 399 rc = 0;
@@ -403,7 +407,7 @@ sclp_writedata_callback(struct sclp_req *request, void *data)
403 /* remove processed buffers and requeue rest */ 407 /* remove processed buffers and requeue rest */
404 if (sclp_remove_processed((struct sccb_header *) sccb) > 0) { 408 if (sclp_remove_processed((struct sccb_header *) sccb) > 0) {
405 /* not all buffers were processed */ 409 /* not all buffers were processed */
406 sccb->header.response_code = 0x0000; 410 sccb->response_code = 0x0000;
407 buffer->request.status = SCLP_REQ_FILLED; 411 buffer->request.status = SCLP_REQ_FILLED;
408 rc = sclp_add_request(request); 412 rc = sclp_add_request(request);
409 if (rc == 0) 413 if (rc == 0)
@@ -419,14 +423,14 @@ sclp_writedata_callback(struct sclp_req *request, void *data)
419 break; 423 break;
420 } 424 }
421 /* retry request */ 425 /* retry request */
422 sccb->header.response_code = 0x0000; 426 sccb->response_code = 0x0000;
423 buffer->request.status = SCLP_REQ_FILLED; 427 buffer->request.status = SCLP_REQ_FILLED;
424 rc = sclp_add_request(request); 428 rc = sclp_add_request(request);
425 if (rc == 0) 429 if (rc == 0)
426 return; 430 return;
427 break; 431 break;
428 default: 432 default:
429 if (sccb->header.response_code == 0x71f0) 433 if (sccb->response_code == 0x71f0)
430 rc = -ENOMEM; 434 rc = -ENOMEM;
431 else 435 else
432 rc = -EINVAL; 436 rc = -EINVAL;
@@ -445,25 +449,19 @@ int
445sclp_emit_buffer(struct sclp_buffer *buffer, 449sclp_emit_buffer(struct sclp_buffer *buffer,
446 void (*callback)(struct sclp_buffer *, int)) 450 void (*callback)(struct sclp_buffer *, int))
447{ 451{
448 struct write_sccb *sccb;
449
450 /* add current line if there is one */ 452 /* add current line if there is one */
451 if (buffer->current_line != NULL) 453 if (buffer->current_line != NULL)
452 sclp_finalize_mto(buffer); 454 sclp_finalize_mto(buffer);
453 455
454 /* Are there messages in the output buffer ? */ 456 /* Are there messages in the output buffer ? */
455 if (buffer->mto_number == 0) 457 if (buffer->messages == 0)
456 return -EIO; 458 return -EIO;
457 459
458 sccb = buffer->sccb;
459 /* Use normal write message */
460 sccb->msg_buf.header.type = EVTYP_MSG;
461
462 buffer->request.command = SCLP_CMDW_WRITE_EVENT_DATA; 460 buffer->request.command = SCLP_CMDW_WRITE_EVENT_DATA;
463 buffer->request.status = SCLP_REQ_FILLED; 461 buffer->request.status = SCLP_REQ_FILLED;
464 buffer->request.callback = sclp_writedata_callback; 462 buffer->request.callback = sclp_writedata_callback;
465 buffer->request.callback_data = buffer; 463 buffer->request.callback_data = buffer;
466 buffer->request.sccb = sccb; 464 buffer->request.sccb = buffer->sccb;
467 buffer->callback = callback; 465 buffer->callback = callback;
468 return sclp_add_request(&buffer->request); 466 return sclp_add_request(&buffer->request);
469} 467}
diff --git a/drivers/s390/char/sclp_rw.h b/drivers/s390/char/sclp_rw.h
index 7a7bfc947d97..e3b0290995ba 100644
--- a/drivers/s390/char/sclp_rw.h
+++ b/drivers/s390/char/sclp_rw.h
@@ -45,6 +45,7 @@ struct mdb_header {
45struct mdb { 45struct mdb {
46 struct mdb_header header; 46 struct mdb_header header;
47 struct go go; 47 struct go go;
48 struct mto mto;
48} __attribute__((packed)); 49} __attribute__((packed));
49 50
50struct msg_buf { 51struct msg_buf {
@@ -52,14 +53,9 @@ struct msg_buf {
52 struct mdb mdb; 53 struct mdb mdb;
53} __attribute__((packed)); 54} __attribute__((packed));
54 55
55struct write_sccb {
56 struct sccb_header header;
57 struct msg_buf msg_buf;
58} __attribute__((packed));
59
60/* The number of empty mto buffers that can be contained in a single sccb. */ 56/* The number of empty mto buffers that can be contained in a single sccb. */
61#define NR_EMPTY_MTO_PER_SCCB ((PAGE_SIZE - sizeof(struct sclp_buffer) - \ 57#define NR_EMPTY_MSG_PER_SCCB ((PAGE_SIZE - sizeof(struct sclp_buffer) - \
62 sizeof(struct write_sccb)) / sizeof(struct mto)) 58 sizeof(struct sccb_header)) / sizeof(struct msg_buf))
63 59
64/* 60/*
65 * data structure for information about list of SCCBs (only for writing), 61 * data structure for information about list of SCCBs (only for writing),
@@ -68,7 +64,8 @@ struct write_sccb {
68struct sclp_buffer { 64struct sclp_buffer {
69 struct list_head list; /* list_head for sccb_info chain */ 65 struct list_head list; /* list_head for sccb_info chain */
70 struct sclp_req request; 66 struct sclp_req request;
71 struct write_sccb *sccb; 67 void *sccb;
68 struct msg_buf *current_msg;
72 char *current_line; 69 char *current_line;
73 int current_length; 70 int current_length;
74 int retry_count; 71 int retry_count;
@@ -76,8 +73,8 @@ struct sclp_buffer {
76 unsigned short columns; 73 unsigned short columns;
77 unsigned short htab; 74 unsigned short htab;
78 /* statistics about this buffer */ 75 /* statistics about this buffer */
79 unsigned int mto_char_sum; /* # chars in sccb */ 76 unsigned int char_sum; /* # chars in sccb */
80 unsigned int mto_number; /* # mtos in sccb */ 77 unsigned int messages; /* # messages in sccb */
81 /* Callback that is called after reaching final status. */ 78 /* Callback that is called after reaching final status. */
82 void (*callback)(struct sclp_buffer *, int); 79 void (*callback)(struct sclp_buffer *, int);
83}; 80};
diff --git a/drivers/s390/char/sclp_tty.c b/drivers/s390/char/sclp_tty.c
index 003663288e29..3c6e174e19b6 100644
--- a/drivers/s390/char/sclp_tty.c
+++ b/drivers/s390/char/sclp_tty.c
@@ -84,8 +84,8 @@ sclp_tty_close(struct tty_struct *tty, struct file *filp)
84 * to change as output buffers get emptied, or if the output flow 84 * to change as output buffers get emptied, or if the output flow
85 * control is acted. This is not an exact number because not every 85 * control is acted. This is not an exact number because not every
86 * character needs the same space in the sccb. The worst case is 86 * character needs the same space in the sccb. The worst case is
87 * a string of newlines. Every newlines creates a new mto which 87 * a string of newlines. Every newline creates a new message which
88 * needs 8 bytes. 88 * needs 82 bytes.
89 */ 89 */
90static int 90static int
91sclp_tty_write_room (struct tty_struct *tty) 91sclp_tty_write_room (struct tty_struct *tty)
@@ -97,9 +97,9 @@ sclp_tty_write_room (struct tty_struct *tty)
97 spin_lock_irqsave(&sclp_tty_lock, flags); 97 spin_lock_irqsave(&sclp_tty_lock, flags);
98 count = 0; 98 count = 0;
99 if (sclp_ttybuf != NULL) 99 if (sclp_ttybuf != NULL)
100 count = sclp_buffer_space(sclp_ttybuf) / sizeof(struct mto); 100 count = sclp_buffer_space(sclp_ttybuf) / sizeof(struct msg_buf);
101 list_for_each(l, &sclp_tty_pages) 101 list_for_each(l, &sclp_tty_pages)
102 count += NR_EMPTY_MTO_PER_SCCB; 102 count += NR_EMPTY_MSG_PER_SCCB;
103 spin_unlock_irqrestore(&sclp_tty_lock, flags); 103 spin_unlock_irqrestore(&sclp_tty_lock, flags);
104 return count; 104 return count;
105} 105}
diff --git a/drivers/s390/cio/cio.c b/drivers/s390/cio/cio.c
index 07fc5d9e7f10..b5620e818d6b 100644
--- a/drivers/s390/cio/cio.c
+++ b/drivers/s390/cio/cio.c
@@ -476,26 +476,6 @@ static int cio_check_devno_blacklisted(struct subchannel *sch)
476 return 0; 476 return 0;
477} 477}
478 478
479static int cio_validate_io_subchannel(struct subchannel *sch)
480{
481 /* Initialization for io subchannels. */
482 if (!css_sch_is_valid(&sch->schib))
483 return -ENODEV;
484
485 /* Devno is valid. */
486 return cio_check_devno_blacklisted(sch);
487}
488
489static int cio_validate_msg_subchannel(struct subchannel *sch)
490{
491 /* Initialization for message subchannels. */
492 if (!css_sch_is_valid(&sch->schib))
493 return -ENODEV;
494
495 /* Devno is valid. */
496 return cio_check_devno_blacklisted(sch);
497}
498
499/** 479/**
500 * cio_validate_subchannel - basic validation of subchannel 480 * cio_validate_subchannel - basic validation of subchannel
501 * @sch: subchannel structure to be filled out 481 * @sch: subchannel structure to be filled out
@@ -533,10 +513,11 @@ int cio_validate_subchannel(struct subchannel *sch, struct subchannel_id schid)
533 513
534 switch (sch->st) { 514 switch (sch->st) {
535 case SUBCHANNEL_TYPE_IO: 515 case SUBCHANNEL_TYPE_IO:
536 err = cio_validate_io_subchannel(sch);
537 break;
538 case SUBCHANNEL_TYPE_MSG: 516 case SUBCHANNEL_TYPE_MSG:
539 err = cio_validate_msg_subchannel(sch); 517 if (!css_sch_is_valid(&sch->schib))
518 err = -ENODEV;
519 else
520 err = cio_check_devno_blacklisted(sch);
540 break; 521 break;
541 default: 522 default:
542 err = 0; 523 err = 0;
@@ -826,11 +807,11 @@ static atomic_t chpid_reset_count;
826static void s390_reset_chpids_mcck_handler(void) 807static void s390_reset_chpids_mcck_handler(void)
827{ 808{
828 struct crw crw; 809 struct crw crw;
829 struct mci *mci; 810 union mci mci;
830 811
831 /* Check for pending channel report word. */ 812 /* Check for pending channel report word. */
832 mci = (struct mci *)&S390_lowcore.mcck_interruption_code; 813 mci.val = S390_lowcore.mcck_interruption_code;
833 if (!mci->cp) 814 if (!mci.cp)
834 return; 815 return;
835 /* Process channel report words. */ 816 /* Process channel report words. */
836 while (stcrw(&crw) == 0) { 817 while (stcrw(&crw) == 0) {
diff --git a/drivers/s390/cio/cmf.c b/drivers/s390/cio/cmf.c
index 23054f8fa9fc..b2afad5a5682 100644
--- a/drivers/s390/cio/cmf.c
+++ b/drivers/s390/cio/cmf.c
@@ -113,7 +113,6 @@ module_param(format, bint, 0444);
113 * @readall: read a measurement block in a common format 113 * @readall: read a measurement block in a common format
114 * @reset: clear the data in the associated measurement block and 114 * @reset: clear the data in the associated measurement block and
115 * reset its time stamp 115 * reset its time stamp
116 * @align: align an allocated block so that the hardware can use it
117 */ 116 */
118struct cmb_operations { 117struct cmb_operations {
119 int (*alloc) (struct ccw_device *); 118 int (*alloc) (struct ccw_device *);
@@ -122,7 +121,6 @@ struct cmb_operations {
122 u64 (*read) (struct ccw_device *, int); 121 u64 (*read) (struct ccw_device *, int);
123 int (*readall)(struct ccw_device *, struct cmbdata *); 122 int (*readall)(struct ccw_device *, struct cmbdata *);
124 void (*reset) (struct ccw_device *); 123 void (*reset) (struct ccw_device *);
125 void *(*align) (void *);
126/* private: */ 124/* private: */
127 struct attribute_group *attr_group; 125 struct attribute_group *attr_group;
128}; 126};
@@ -186,9 +184,8 @@ static inline void cmf_activate(void *area, unsigned int onoff)
186static int set_schib(struct ccw_device *cdev, u32 mme, int mbfc, 184static int set_schib(struct ccw_device *cdev, u32 mme, int mbfc,
187 unsigned long address) 185 unsigned long address)
188{ 186{
189 struct subchannel *sch; 187 struct subchannel *sch = to_subchannel(cdev->dev.parent);
190 188 int ret;
191 sch = to_subchannel(cdev->dev.parent);
192 189
193 sch->config.mme = mme; 190 sch->config.mme = mme;
194 sch->config.mbfc = mbfc; 191 sch->config.mbfc = mbfc;
@@ -198,7 +195,15 @@ static int set_schib(struct ccw_device *cdev, u32 mme, int mbfc,
198 else 195 else
199 sch->config.mbi = address; 196 sch->config.mbi = address;
200 197
201 return cio_commit_config(sch); 198 ret = cio_commit_config(sch);
199 if (!mme && ret == -ENODEV) {
200 /*
201 * The task was to disable measurement block updates but
202 * the subchannel is already gone. Report success.
203 */
204 ret = 0;
205 }
206 return ret;
202} 207}
203 208
204struct set_schib_struct { 209struct set_schib_struct {
@@ -314,7 +319,7 @@ static int cmf_copy_block(struct ccw_device *cdev)
314 return -EBUSY; 319 return -EBUSY;
315 } 320 }
316 cmb_data = cdev->private->cmb; 321 cmb_data = cdev->private->cmb;
317 hw_block = cmbops->align(cmb_data->hw_block); 322 hw_block = cmb_data->hw_block;
318 if (!memcmp(cmb_data->last_block, hw_block, cmb_data->size)) 323 if (!memcmp(cmb_data->last_block, hw_block, cmb_data->size))
319 /* No need to copy. */ 324 /* No need to copy. */
320 return 0; 325 return 0;
@@ -425,7 +430,7 @@ static void cmf_generic_reset(struct ccw_device *cdev)
425 * Need to reset hw block as well to make the hardware start 430 * Need to reset hw block as well to make the hardware start
426 * from 0 again. 431 * from 0 again.
427 */ 432 */
428 memset(cmbops->align(cmb_data->hw_block), 0, cmb_data->size); 433 memset(cmb_data->hw_block, 0, cmb_data->size);
429 cmb_data->last_update = 0; 434 cmb_data->last_update = 0;
430 } 435 }
431 cdev->private->cmb_start_time = get_tod_clock(); 436 cdev->private->cmb_start_time = get_tod_clock();
@@ -606,12 +611,6 @@ static void free_cmb(struct ccw_device *cdev)
606 spin_lock_irq(cdev->ccwlock); 611 spin_lock_irq(cdev->ccwlock);
607 612
608 priv = cdev->private; 613 priv = cdev->private;
609
610 if (list_empty(&priv->cmb_list)) {
611 /* already freed */
612 goto out;
613 }
614
615 cmb_data = priv->cmb; 614 cmb_data = priv->cmb;
616 priv->cmb = NULL; 615 priv->cmb = NULL;
617 if (cmb_data) 616 if (cmb_data)
@@ -626,7 +625,6 @@ static void free_cmb(struct ccw_device *cdev)
626 free_pages((unsigned long)cmb_area.mem, get_order(size)); 625 free_pages((unsigned long)cmb_area.mem, get_order(size));
627 cmb_area.mem = NULL; 626 cmb_area.mem = NULL;
628 } 627 }
629out:
630 spin_unlock_irq(cdev->ccwlock); 628 spin_unlock_irq(cdev->ccwlock);
631 spin_unlock(&cmb_area.lock); 629 spin_unlock(&cmb_area.lock);
632} 630}
@@ -755,11 +753,6 @@ static void reset_cmb(struct ccw_device *cdev)
755 cmf_generic_reset(cdev); 753 cmf_generic_reset(cdev);
756} 754}
757 755
758static void * align_cmb(void *area)
759{
760 return area;
761}
762
763static struct attribute_group cmf_attr_group; 756static struct attribute_group cmf_attr_group;
764 757
765static struct cmb_operations cmbops_basic = { 758static struct cmb_operations cmbops_basic = {
@@ -769,7 +762,6 @@ static struct cmb_operations cmbops_basic = {
769 .read = read_cmb, 762 .read = read_cmb,
770 .readall = readall_cmb, 763 .readall = readall_cmb,
771 .reset = reset_cmb, 764 .reset = reset_cmb,
772 .align = align_cmb,
773 .attr_group = &cmf_attr_group, 765 .attr_group = &cmf_attr_group,
774}; 766};
775 767
@@ -804,64 +796,57 @@ struct cmbe {
804 u32 device_busy_time; 796 u32 device_busy_time;
805 u32 initial_command_response_time; 797 u32 initial_command_response_time;
806 u32 reserved[7]; 798 u32 reserved[7];
807}; 799} __packed __aligned(64);
808 800
809/* 801static struct kmem_cache *cmbe_cache;
810 * kmalloc only guarantees 8 byte alignment, but we need cmbe
811 * pointers to be naturally aligned. Make sure to allocate
812 * enough space for two cmbes.
813 */
814static inline struct cmbe *cmbe_align(struct cmbe *c)
815{
816 unsigned long addr;
817 addr = ((unsigned long)c + sizeof (struct cmbe) - sizeof(long)) &
818 ~(sizeof (struct cmbe) - sizeof(long));
819 return (struct cmbe*)addr;
820}
821 802
822static int alloc_cmbe(struct ccw_device *cdev) 803static int alloc_cmbe(struct ccw_device *cdev)
823{ 804{
824 struct cmbe *cmbe;
825 struct cmb_data *cmb_data; 805 struct cmb_data *cmb_data;
826 int ret; 806 struct cmbe *cmbe;
807 int ret = -ENOMEM;
827 808
828 cmbe = kzalloc (sizeof (*cmbe) * 2, GFP_KERNEL); 809 cmbe = kmem_cache_zalloc(cmbe_cache, GFP_KERNEL);
829 if (!cmbe) 810 if (!cmbe)
830 return -ENOMEM; 811 return ret;
831 cmb_data = kzalloc(sizeof(struct cmb_data), GFP_KERNEL); 812
832 if (!cmb_data) { 813 cmb_data = kzalloc(sizeof(*cmb_data), GFP_KERNEL);
833 ret = -ENOMEM; 814 if (!cmb_data)
834 goto out_free; 815 goto out_free;
835 } 816
836 cmb_data->last_block = kzalloc(sizeof(struct cmbe), GFP_KERNEL); 817 cmb_data->last_block = kzalloc(sizeof(struct cmbe), GFP_KERNEL);
837 if (!cmb_data->last_block) { 818 if (!cmb_data->last_block)
838 ret = -ENOMEM;
839 goto out_free; 819 goto out_free;
840 } 820
841 cmb_data->size = sizeof(struct cmbe); 821 cmb_data->size = sizeof(*cmbe);
842 spin_lock_irq(cdev->ccwlock);
843 if (cdev->private->cmb) {
844 spin_unlock_irq(cdev->ccwlock);
845 ret = -EBUSY;
846 goto out_free;
847 }
848 cmb_data->hw_block = cmbe; 822 cmb_data->hw_block = cmbe;
823
824 spin_lock(&cmb_area.lock);
825 spin_lock_irq(cdev->ccwlock);
826 if (cdev->private->cmb)
827 goto out_unlock;
828
849 cdev->private->cmb = cmb_data; 829 cdev->private->cmb = cmb_data;
850 spin_unlock_irq(cdev->ccwlock);
851 830
852 /* activate global measurement if this is the first channel */ 831 /* activate global measurement if this is the first channel */
853 spin_lock(&cmb_area.lock);
854 if (list_empty(&cmb_area.list)) 832 if (list_empty(&cmb_area.list))
855 cmf_activate(NULL, 1); 833 cmf_activate(NULL, 1);
856 list_add_tail(&cdev->private->cmb_list, &cmb_area.list); 834 list_add_tail(&cdev->private->cmb_list, &cmb_area.list);
857 spin_unlock(&cmb_area.lock);
858 835
836 spin_unlock_irq(cdev->ccwlock);
837 spin_unlock(&cmb_area.lock);
859 return 0; 838 return 0;
839
840out_unlock:
841 spin_unlock_irq(cdev->ccwlock);
842 spin_unlock(&cmb_area.lock);
843 ret = -EBUSY;
860out_free: 844out_free:
861 if (cmb_data) 845 if (cmb_data)
862 kfree(cmb_data->last_block); 846 kfree(cmb_data->last_block);
863 kfree(cmb_data); 847 kfree(cmb_data);
864 kfree(cmbe); 848 kmem_cache_free(cmbe_cache, cmbe);
849
865 return ret; 850 return ret;
866} 851}
867 852
@@ -869,19 +854,21 @@ static void free_cmbe(struct ccw_device *cdev)
869{ 854{
870 struct cmb_data *cmb_data; 855 struct cmb_data *cmb_data;
871 856
857 spin_lock(&cmb_area.lock);
872 spin_lock_irq(cdev->ccwlock); 858 spin_lock_irq(cdev->ccwlock);
873 cmb_data = cdev->private->cmb; 859 cmb_data = cdev->private->cmb;
874 cdev->private->cmb = NULL; 860 cdev->private->cmb = NULL;
875 if (cmb_data) 861 if (cmb_data) {
876 kfree(cmb_data->last_block); 862 kfree(cmb_data->last_block);
863 kmem_cache_free(cmbe_cache, cmb_data->hw_block);
864 }
877 kfree(cmb_data); 865 kfree(cmb_data);
878 spin_unlock_irq(cdev->ccwlock);
879 866
880 /* deactivate global measurement if this is the last channel */ 867 /* deactivate global measurement if this is the last channel */
881 spin_lock(&cmb_area.lock);
882 list_del_init(&cdev->private->cmb_list); 868 list_del_init(&cdev->private->cmb_list);
883 if (list_empty(&cmb_area.list)) 869 if (list_empty(&cmb_area.list))
884 cmf_activate(NULL, 0); 870 cmf_activate(NULL, 0);
871 spin_unlock_irq(cdev->ccwlock);
885 spin_unlock(&cmb_area.lock); 872 spin_unlock(&cmb_area.lock);
886} 873}
887 874
@@ -897,7 +884,7 @@ static int set_cmbe(struct ccw_device *cdev, u32 mme)
897 return -EINVAL; 884 return -EINVAL;
898 } 885 }
899 cmb_data = cdev->private->cmb; 886 cmb_data = cdev->private->cmb;
900 mba = mme ? (unsigned long) cmbe_align(cmb_data->hw_block) : 0; 887 mba = mme ? (unsigned long) cmb_data->hw_block : 0;
901 spin_unlock_irqrestore(cdev->ccwlock, flags); 888 spin_unlock_irqrestore(cdev->ccwlock, flags);
902 889
903 return set_schib_wait(cdev, mme, 1, mba); 890 return set_schib_wait(cdev, mme, 1, mba);
@@ -1022,11 +1009,6 @@ static void reset_cmbe(struct ccw_device *cdev)
1022 cmf_generic_reset(cdev); 1009 cmf_generic_reset(cdev);
1023} 1010}
1024 1011
1025static void * align_cmbe(void *area)
1026{
1027 return cmbe_align(area);
1028}
1029
1030static struct attribute_group cmf_attr_group_ext; 1012static struct attribute_group cmf_attr_group_ext;
1031 1013
1032static struct cmb_operations cmbops_extended = { 1014static struct cmb_operations cmbops_extended = {
@@ -1036,7 +1018,6 @@ static struct cmb_operations cmbops_extended = {
1036 .read = read_cmbe, 1018 .read = read_cmbe,
1037 .readall = readall_cmbe, 1019 .readall = readall_cmbe,
1038 .reset = reset_cmbe, 1020 .reset = reset_cmbe,
1039 .align = align_cmbe,
1040 .attr_group = &cmf_attr_group_ext, 1021 .attr_group = &cmf_attr_group_ext,
1041}; 1022};
1042 1023
@@ -1171,23 +1152,28 @@ static ssize_t cmb_enable_show(struct device *dev,
1171 struct device_attribute *attr, 1152 struct device_attribute *attr,
1172 char *buf) 1153 char *buf)
1173{ 1154{
1174 return sprintf(buf, "%d\n", to_ccwdev(dev)->private->cmb ? 1 : 0); 1155 struct ccw_device *cdev = to_ccwdev(dev);
1156 int enabled;
1157
1158 spin_lock_irq(cdev->ccwlock);
1159 enabled = !!cdev->private->cmb;
1160 spin_unlock_irq(cdev->ccwlock);
1161
1162 return sprintf(buf, "%d\n", enabled);
1175} 1163}
1176 1164
1177static ssize_t cmb_enable_store(struct device *dev, 1165static ssize_t cmb_enable_store(struct device *dev,
1178 struct device_attribute *attr, const char *buf, 1166 struct device_attribute *attr, const char *buf,
1179 size_t c) 1167 size_t c)
1180{ 1168{
1181 struct ccw_device *cdev; 1169 struct ccw_device *cdev = to_ccwdev(dev);
1182 int ret;
1183 unsigned long val; 1170 unsigned long val;
1171 int ret;
1184 1172
1185 ret = kstrtoul(buf, 16, &val); 1173 ret = kstrtoul(buf, 16, &val);
1186 if (ret) 1174 if (ret)
1187 return ret; 1175 return ret;
1188 1176
1189 cdev = to_ccwdev(dev);
1190
1191 switch (val) { 1177 switch (val) {
1192 case 0: 1178 case 0:
1193 ret = disable_cmf(cdev); 1179 ret = disable_cmf(cdev);
@@ -1195,12 +1181,13 @@ static ssize_t cmb_enable_store(struct device *dev,
1195 case 1: 1181 case 1:
1196 ret = enable_cmf(cdev); 1182 ret = enable_cmf(cdev);
1197 break; 1183 break;
1184 default:
1185 ret = -EINVAL;
1198 } 1186 }
1199 1187
1200 return c; 1188 return ret ? ret : c;
1201} 1189}
1202 1190DEVICE_ATTR_RW(cmb_enable);
1203DEVICE_ATTR(cmb_enable, 0644, cmb_enable_show, cmb_enable_store);
1204 1191
1205int ccw_set_cmf(struct ccw_device *cdev, int enable) 1192int ccw_set_cmf(struct ccw_device *cdev, int enable)
1206{ 1193{
@@ -1220,41 +1207,71 @@ int enable_cmf(struct ccw_device *cdev)
1220{ 1207{
1221 int ret; 1208 int ret;
1222 1209
1210 device_lock(&cdev->dev);
1211 get_device(&cdev->dev);
1223 ret = cmbops->alloc(cdev); 1212 ret = cmbops->alloc(cdev);
1224 cmbops->reset(cdev);
1225 if (ret) 1213 if (ret)
1226 return ret; 1214 goto out;
1215 cmbops->reset(cdev);
1216 ret = sysfs_create_group(&cdev->dev.kobj, cmbops->attr_group);
1217 if (ret) {
1218 cmbops->free(cdev);
1219 goto out;
1220 }
1227 ret = cmbops->set(cdev, 2); 1221 ret = cmbops->set(cdev, 2);
1228 if (ret) { 1222 if (ret) {
1223 sysfs_remove_group(&cdev->dev.kobj, cmbops->attr_group);
1229 cmbops->free(cdev); 1224 cmbops->free(cdev);
1230 return ret;
1231 } 1225 }
1232 ret = sysfs_create_group(&cdev->dev.kobj, cmbops->attr_group); 1226out:
1233 if (!ret) 1227 if (ret)
1234 return 0; 1228 put_device(&cdev->dev);
1235 cmbops->set(cdev, 0); //FIXME: this can fail 1229
1236 cmbops->free(cdev); 1230 device_unlock(&cdev->dev);
1237 return ret; 1231 return ret;
1238} 1232}
1239 1233
1240/** 1234/**
1241 * disable_cmf() - switch off the channel measurement for a specific device 1235 * __disable_cmf() - switch off the channel measurement for a specific device
1242 * @cdev: The ccw device to be disabled 1236 * @cdev: The ccw device to be disabled
1243 * 1237 *
1244 * Returns %0 for success or a negative error value. 1238 * Returns %0 for success or a negative error value.
1245 * 1239 *
1246 * Context: 1240 * Context:
1247 * non-atomic 1241 * non-atomic, device_lock() held.
1248 */ 1242 */
1249int disable_cmf(struct ccw_device *cdev) 1243int __disable_cmf(struct ccw_device *cdev)
1250{ 1244{
1251 int ret; 1245 int ret;
1252 1246
1253 ret = cmbops->set(cdev, 0); 1247 ret = cmbops->set(cdev, 0);
1254 if (ret) 1248 if (ret)
1255 return ret; 1249 return ret;
1256 cmbops->free(cdev); 1250
1257 sysfs_remove_group(&cdev->dev.kobj, cmbops->attr_group); 1251 sysfs_remove_group(&cdev->dev.kobj, cmbops->attr_group);
1252 cmbops->free(cdev);
1253 put_device(&cdev->dev);
1254
1255 return ret;
1256}
1257
1258/**
1259 * disable_cmf() - switch off the channel measurement for a specific device
1260 * @cdev: The ccw device to be disabled
1261 *
1262 * Returns %0 for success or a negative error value.
1263 *
1264 * Context:
1265 * non-atomic
1266 */
1267int disable_cmf(struct ccw_device *cdev)
1268{
1269 int ret;
1270
1271 device_lock(&cdev->dev);
1272 ret = __disable_cmf(cdev);
1273 device_unlock(&cdev->dev);
1274
1258 return ret; 1275 return ret;
1259} 1276}
1260 1277
@@ -1295,10 +1312,32 @@ int cmf_reenable(struct ccw_device *cdev)
1295 return cmbops->set(cdev, 2); 1312 return cmbops->set(cdev, 2);
1296} 1313}
1297 1314
1315/**
1316 * cmf_reactivate() - reactivate measurement block updates
1317 *
1318 * Use this during resume from hibernate.
1319 */
1320void cmf_reactivate(void)
1321{
1322 spin_lock(&cmb_area.lock);
1323 if (!list_empty(&cmb_area.list))
1324 cmf_activate(cmb_area.mem, 1);
1325 spin_unlock(&cmb_area.lock);
1326}
1327
1328static int __init init_cmbe(void)
1329{
1330 cmbe_cache = kmem_cache_create("cmbe_cache", sizeof(struct cmbe),
1331 __alignof__(struct cmbe), 0, NULL);
1332
1333 return cmbe_cache ? 0 : -ENOMEM;
1334}
1335
1298static int __init init_cmf(void) 1336static int __init init_cmf(void)
1299{ 1337{
1300 char *format_string; 1338 char *format_string;
1301 char *detect_string = "parameter"; 1339 char *detect_string;
1340 int ret;
1302 1341
1303 /* 1342 /*
1304 * If the user did not give a parameter, see if we are running on a 1343 * If the user did not give a parameter, see if we are running on a
@@ -1324,15 +1363,18 @@ static int __init init_cmf(void)
1324 case CMF_EXTENDED: 1363 case CMF_EXTENDED:
1325 format_string = "extended"; 1364 format_string = "extended";
1326 cmbops = &cmbops_extended; 1365 cmbops = &cmbops_extended;
1366
1367 ret = init_cmbe();
1368 if (ret)
1369 return ret;
1327 break; 1370 break;
1328 default: 1371 default:
1329 return 1; 1372 return -EINVAL;
1330 } 1373 }
1331 pr_info("Channel measurement facility initialized using format " 1374 pr_info("Channel measurement facility initialized using format "
1332 "%s (mode %s)\n", format_string, detect_string); 1375 "%s (mode %s)\n", format_string, detect_string);
1333 return 0; 1376 return 0;
1334} 1377}
1335
1336module_init(init_cmf); 1378module_init(init_cmf);
1337 1379
1338 1380
diff --git a/drivers/s390/cio/css.c b/drivers/s390/cio/css.c
index 0268e5fd59b5..2ee3053bdc12 100644
--- a/drivers/s390/cio/css.c
+++ b/drivers/s390/cio/css.c
@@ -44,7 +44,6 @@ for_each_subchannel(int(*fn)(struct subchannel_id, void *), void *data)
44 int ret; 44 int ret;
45 45
46 init_subchannel_id(&schid); 46 init_subchannel_id(&schid);
47 ret = -ENODEV;
48 do { 47 do {
49 do { 48 do {
50 ret = fn(schid, data); 49 ret = fn(schid, data);
@@ -1089,6 +1088,7 @@ void channel_subsystem_reinit(void)
1089 if (chp) 1088 if (chp)
1090 chp_update_desc(chp); 1089 chp_update_desc(chp);
1091 } 1090 }
1091 cmf_reactivate();
1092} 1092}
1093 1093
1094#ifdef CONFIG_PROC_FS 1094#ifdef CONFIG_PROC_FS
diff --git a/drivers/s390/cio/device.c b/drivers/s390/cio/device.c
index dfef5e63cb7b..6aae68412802 100644
--- a/drivers/s390/cio/device.c
+++ b/drivers/s390/cio/device.c
@@ -1787,6 +1787,8 @@ static int ccw_device_remove(struct device *dev)
1787 cdev->drv = NULL; 1787 cdev->drv = NULL;
1788 cdev->private->int_class = IRQIO_CIO; 1788 cdev->private->int_class = IRQIO_CIO;
1789 spin_unlock_irq(cdev->ccwlock); 1789 spin_unlock_irq(cdev->ccwlock);
1790 __disable_cmf(cdev);
1791
1790 return 0; 1792 return 0;
1791} 1793}
1792 1794
@@ -1797,7 +1799,7 @@ static void ccw_device_shutdown(struct device *dev)
1797 cdev = to_ccwdev(dev); 1799 cdev = to_ccwdev(dev);
1798 if (cdev->drv && cdev->drv->shutdown) 1800 if (cdev->drv && cdev->drv->shutdown)
1799 cdev->drv->shutdown(cdev); 1801 cdev->drv->shutdown(cdev);
1800 disable_cmf(cdev); 1802 __disable_cmf(cdev);
1801} 1803}
1802 1804
1803static int ccw_device_pm_prepare(struct device *dev) 1805static int ccw_device_pm_prepare(struct device *dev)
diff --git a/drivers/s390/cio/device.h b/drivers/s390/cio/device.h
index 8d1d29873172..065b1be98e2c 100644
--- a/drivers/s390/cio/device.h
+++ b/drivers/s390/cio/device.h
@@ -125,11 +125,6 @@ void ccw_device_verify_done(struct ccw_device *, int);
125void ccw_device_disband_start(struct ccw_device *); 125void ccw_device_disband_start(struct ccw_device *);
126void ccw_device_disband_done(struct ccw_device *, int); 126void ccw_device_disband_done(struct ccw_device *, int);
127 127
128void ccw_device_stlck_start(struct ccw_device *, void *, void *, void *);
129void ccw_device_stlck_done(struct ccw_device *, void *, int);
130
131int ccw_device_call_handler(struct ccw_device *);
132
133int ccw_device_stlck(struct ccw_device *); 128int ccw_device_stlck(struct ccw_device *);
134 129
135/* Helper function for machine check handling. */ 130/* Helper function for machine check handling. */
@@ -145,6 +140,7 @@ void ccw_device_set_timeout(struct ccw_device *, int);
145void retry_set_schib(struct ccw_device *cdev); 140void retry_set_schib(struct ccw_device *cdev);
146void cmf_retry_copy_block(struct ccw_device *); 141void cmf_retry_copy_block(struct ccw_device *);
147int cmf_reenable(struct ccw_device *); 142int cmf_reenable(struct ccw_device *);
143void cmf_reactivate(void);
148int ccw_set_cmf(struct ccw_device *cdev, int enable); 144int ccw_set_cmf(struct ccw_device *cdev, int enable);
149extern struct device_attribute dev_attr_cmb_enable; 145extern struct device_attribute dev_attr_cmb_enable;
150#endif 146#endif
diff --git a/drivers/s390/cio/device_fsm.c b/drivers/s390/cio/device_fsm.c
index 83da53c8e54c..92e03b42e661 100644
--- a/drivers/s390/cio/device_fsm.c
+++ b/drivers/s390/cio/device_fsm.c
@@ -731,6 +731,44 @@ static void ccw_device_boxed_verify(struct ccw_device *cdev,
731} 731}
732 732
733/* 733/*
734 * Pass interrupt to device driver.
735 */
736static int ccw_device_call_handler(struct ccw_device *cdev)
737{
738 unsigned int stctl;
739 int ending_status;
740
741 /*
742 * we allow for the device action handler if .
743 * - we received ending status
744 * - the action handler requested to see all interrupts
745 * - we received an intermediate status
746 * - fast notification was requested (primary status)
747 * - unsolicited interrupts
748 */
749 stctl = scsw_stctl(&cdev->private->irb.scsw);
750 ending_status = (stctl & SCSW_STCTL_SEC_STATUS) ||
751 (stctl == (SCSW_STCTL_ALERT_STATUS | SCSW_STCTL_STATUS_PEND)) ||
752 (stctl == SCSW_STCTL_STATUS_PEND);
753 if (!ending_status &&
754 !cdev->private->options.repall &&
755 !(stctl & SCSW_STCTL_INTER_STATUS) &&
756 !(cdev->private->options.fast &&
757 (stctl & SCSW_STCTL_PRIM_STATUS)))
758 return 0;
759
760 if (ending_status)
761 ccw_device_set_timeout(cdev, 0);
762
763 if (cdev->handler)
764 cdev->handler(cdev, cdev->private->intparm,
765 &cdev->private->irb);
766
767 memset(&cdev->private->irb, 0, sizeof(struct irb));
768 return 1;
769}
770
771/*
734 * Got an interrupt for a normal io (state online). 772 * Got an interrupt for a normal io (state online).
735 */ 773 */
736static void 774static void
diff --git a/drivers/s390/cio/device_ops.c b/drivers/s390/cio/device_ops.c
index 6acd0b577694..a69f702a2fcc 100644
--- a/drivers/s390/cio/device_ops.c
+++ b/drivers/s390/cio/device_ops.c
@@ -412,52 +412,6 @@ int ccw_device_resume(struct ccw_device *cdev)
412 return cio_resume(sch); 412 return cio_resume(sch);
413} 413}
414 414
415/*
416 * Pass interrupt to device driver.
417 */
418int
419ccw_device_call_handler(struct ccw_device *cdev)
420{
421 unsigned int stctl;
422 int ending_status;
423
424 /*
425 * we allow for the device action handler if .
426 * - we received ending status
427 * - the action handler requested to see all interrupts
428 * - we received an intermediate status
429 * - fast notification was requested (primary status)
430 * - unsolicited interrupts
431 */
432 stctl = scsw_stctl(&cdev->private->irb.scsw);
433 ending_status = (stctl & SCSW_STCTL_SEC_STATUS) ||
434 (stctl == (SCSW_STCTL_ALERT_STATUS | SCSW_STCTL_STATUS_PEND)) ||
435 (stctl == SCSW_STCTL_STATUS_PEND);
436 if (!ending_status &&
437 !cdev->private->options.repall &&
438 !(stctl & SCSW_STCTL_INTER_STATUS) &&
439 !(cdev->private->options.fast &&
440 (stctl & SCSW_STCTL_PRIM_STATUS)))
441 return 0;
442
443 /* Clear pending timers for device driver initiated I/O. */
444 if (ending_status)
445 ccw_device_set_timeout(cdev, 0);
446 /*
447 * Now we are ready to call the device driver interrupt handler.
448 */
449 if (cdev->handler)
450 cdev->handler(cdev, cdev->private->intparm,
451 &cdev->private->irb);
452
453 /*
454 * Clear the old and now useless interrupt response block.
455 */
456 memset(&cdev->private->irb, 0, sizeof(struct irb));
457
458 return 1;
459}
460
461/** 415/**
462 * ccw_device_get_ciw() - Search for CIW command in extended sense data. 416 * ccw_device_get_ciw() - Search for CIW command in extended sense data.
463 * @cdev: ccw device to inspect 417 * @cdev: ccw device to inspect
@@ -502,67 +456,6 @@ __u8 ccw_device_get_path_mask(struct ccw_device *cdev)
502 return sch->lpm; 456 return sch->lpm;
503} 457}
504 458
505struct stlck_data {
506 struct completion done;
507 int rc;
508};
509
510void ccw_device_stlck_done(struct ccw_device *cdev, void *data, int rc)
511{
512 struct stlck_data *sdata = data;
513
514 sdata->rc = rc;
515 complete(&sdata->done);
516}
517
518/*
519 * Perform unconditional reserve + release.
520 */
521int ccw_device_stlck(struct ccw_device *cdev)
522{
523 struct subchannel *sch = to_subchannel(cdev->dev.parent);
524 struct stlck_data data;
525 u8 *buffer;
526 int rc;
527
528 /* Check if steal lock operation is valid for this device. */
529 if (cdev->drv) {
530 if (!cdev->private->options.force)
531 return -EINVAL;
532 }
533 buffer = kzalloc(64, GFP_DMA | GFP_KERNEL);
534 if (!buffer)
535 return -ENOMEM;
536 init_completion(&data.done);
537 data.rc = -EIO;
538 spin_lock_irq(sch->lock);
539 rc = cio_enable_subchannel(sch, (u32) (addr_t) sch);
540 if (rc)
541 goto out_unlock;
542 /* Perform operation. */
543 cdev->private->state = DEV_STATE_STEAL_LOCK;
544 ccw_device_stlck_start(cdev, &data, &buffer[0], &buffer[32]);
545 spin_unlock_irq(sch->lock);
546 /* Wait for operation to finish. */
547 if (wait_for_completion_interruptible(&data.done)) {
548 /* Got a signal. */
549 spin_lock_irq(sch->lock);
550 ccw_request_cancel(cdev);
551 spin_unlock_irq(sch->lock);
552 wait_for_completion(&data.done);
553 }
554 rc = data.rc;
555 /* Check results. */
556 spin_lock_irq(sch->lock);
557 cio_disable_subchannel(sch);
558 cdev->private->state = DEV_STATE_BOXED;
559out_unlock:
560 spin_unlock_irq(sch->lock);
561 kfree(buffer);
562
563 return rc;
564}
565
566/** 459/**
567 * chp_get_chp_desc - return newly allocated channel-path descriptor 460 * chp_get_chp_desc - return newly allocated channel-path descriptor
568 * @cdev: device to obtain the descriptor for 461 * @cdev: device to obtain the descriptor for
diff --git a/drivers/s390/cio/device_pgid.c b/drivers/s390/cio/device_pgid.c
index 37ada05e82a5..da246b67edfe 100644
--- a/drivers/s390/cio/device_pgid.c
+++ b/drivers/s390/cio/device_pgid.c
@@ -9,9 +9,10 @@
9 9
10#include <linux/kernel.h> 10#include <linux/kernel.h>
11#include <linux/string.h> 11#include <linux/string.h>
12#include <linux/bitops.h>
12#include <linux/types.h> 13#include <linux/types.h>
13#include <linux/errno.h> 14#include <linux/errno.h>
14#include <linux/bitops.h> 15#include <linux/slab.h>
15#include <asm/ccwdev.h> 16#include <asm/ccwdev.h>
16#include <asm/cio.h> 17#include <asm/cio.h>
17 18
@@ -133,7 +134,7 @@ static void spid_build_cp(struct ccw_device *cdev, u8 fn)
133{ 134{
134 struct ccw_request *req = &cdev->private->req; 135 struct ccw_request *req = &cdev->private->req;
135 struct ccw1 *cp = cdev->private->iccws; 136 struct ccw1 *cp = cdev->private->iccws;
136 int i = 8 - ffs(req->lpm); 137 int i = pathmask_to_pos(req->lpm);
137 struct pgid *pgid = &cdev->private->pgid[i]; 138 struct pgid *pgid = &cdev->private->pgid[i];
138 139
139 pgid->inf.fc = fn; 140 pgid->inf.fc = fn;
@@ -434,7 +435,7 @@ static void snid_build_cp(struct ccw_device *cdev)
434{ 435{
435 struct ccw_request *req = &cdev->private->req; 436 struct ccw_request *req = &cdev->private->req;
436 struct ccw1 *cp = cdev->private->iccws; 437 struct ccw1 *cp = cdev->private->iccws;
437 int i = 8 - ffs(req->lpm); 438 int i = pathmask_to_pos(req->lpm);
438 439
439 /* Channel program setup. */ 440 /* Channel program setup. */
440 cp->cmd_code = CCW_CMD_SENSE_PGID; 441 cp->cmd_code = CCW_CMD_SENSE_PGID;
@@ -616,6 +617,11 @@ void ccw_device_disband_start(struct ccw_device *cdev)
616 ccw_request_start(cdev); 617 ccw_request_start(cdev);
617} 618}
618 619
620struct stlck_data {
621 struct completion done;
622 int rc;
623};
624
619static void stlck_build_cp(struct ccw_device *cdev, void *buf1, void *buf2) 625static void stlck_build_cp(struct ccw_device *cdev, void *buf1, void *buf2)
620{ 626{
621 struct ccw_request *req = &cdev->private->req; 627 struct ccw_request *req = &cdev->private->req;
@@ -634,7 +640,10 @@ static void stlck_build_cp(struct ccw_device *cdev, void *buf1, void *buf2)
634 640
635static void stlck_callback(struct ccw_device *cdev, void *data, int rc) 641static void stlck_callback(struct ccw_device *cdev, void *data, int rc)
636{ 642{
637 ccw_device_stlck_done(cdev, data, rc); 643 struct stlck_data *sdata = data;
644
645 sdata->rc = rc;
646 complete(&sdata->done);
638} 647}
639 648
640/** 649/**
@@ -645,11 +654,9 @@ static void stlck_callback(struct ccw_device *cdev, void *data, int rc)
645 * @buf2: data pointer used in channel program 654 * @buf2: data pointer used in channel program
646 * 655 *
647 * Execute a channel program on @cdev to release an existing PGID reservation. 656 * Execute a channel program on @cdev to release an existing PGID reservation.
648 * When finished, call ccw_device_stlck_done with a return code specifying the
649 * result.
650 */ 657 */
651void ccw_device_stlck_start(struct ccw_device *cdev, void *data, void *buf1, 658static void ccw_device_stlck_start(struct ccw_device *cdev, void *data,
652 void *buf2) 659 void *buf1, void *buf2)
653{ 660{
654 struct subchannel *sch = to_subchannel(cdev->dev.parent); 661 struct subchannel *sch = to_subchannel(cdev->dev.parent);
655 struct ccw_request *req = &cdev->private->req; 662 struct ccw_request *req = &cdev->private->req;
@@ -667,3 +674,50 @@ void ccw_device_stlck_start(struct ccw_device *cdev, void *data, void *buf1,
667 ccw_request_start(cdev); 674 ccw_request_start(cdev);
668} 675}
669 676
677/*
678 * Perform unconditional reserve + release.
679 */
680int ccw_device_stlck(struct ccw_device *cdev)
681{
682 struct subchannel *sch = to_subchannel(cdev->dev.parent);
683 struct stlck_data data;
684 u8 *buffer;
685 int rc;
686
687 /* Check if steal lock operation is valid for this device. */
688 if (cdev->drv) {
689 if (!cdev->private->options.force)
690 return -EINVAL;
691 }
692 buffer = kzalloc(64, GFP_DMA | GFP_KERNEL);
693 if (!buffer)
694 return -ENOMEM;
695 init_completion(&data.done);
696 data.rc = -EIO;
697 spin_lock_irq(sch->lock);
698 rc = cio_enable_subchannel(sch, (u32) (addr_t) sch);
699 if (rc)
700 goto out_unlock;
701 /* Perform operation. */
702 cdev->private->state = DEV_STATE_STEAL_LOCK;
703 ccw_device_stlck_start(cdev, &data, &buffer[0], &buffer[32]);
704 spin_unlock_irq(sch->lock);
705 /* Wait for operation to finish. */
706 if (wait_for_completion_interruptible(&data.done)) {
707 /* Got a signal. */
708 spin_lock_irq(sch->lock);
709 ccw_request_cancel(cdev);
710 spin_unlock_irq(sch->lock);
711 wait_for_completion(&data.done);
712 }
713 rc = data.rc;
714 /* Check results. */
715 spin_lock_irq(sch->lock);
716 cio_disable_subchannel(sch);
717 cdev->private->state = DEV_STATE_BOXED;
718out_unlock:
719 spin_unlock_irq(sch->lock);
720 kfree(buffer);
721
722 return rc;
723}
diff --git a/drivers/s390/crypto/Makefile b/drivers/s390/crypto/Makefile
index 771faf7094d6..57f710b3c8a4 100644
--- a/drivers/s390/crypto/Makefile
+++ b/drivers/s390/crypto/Makefile
@@ -3,6 +3,6 @@
3# 3#
4 4
5ap-objs := ap_bus.o 5ap-objs := ap_bus.o
6obj-$(CONFIG_ZCRYPT) += ap.o zcrypt_api.o zcrypt_pcicc.o zcrypt_pcixcc.o 6obj-$(CONFIG_ZCRYPT) += ap.o zcrypt_api.o zcrypt_pcixcc.o
7obj-$(CONFIG_ZCRYPT) += zcrypt_pcica.o zcrypt_cex2a.o zcrypt_cex4.o 7obj-$(CONFIG_ZCRYPT) += zcrypt_cex2a.o zcrypt_cex4.o
8obj-$(CONFIG_ZCRYPT) += zcrypt_msgtype6.o zcrypt_msgtype50.o 8obj-$(CONFIG_ZCRYPT) += zcrypt_msgtype6.o zcrypt_msgtype50.o
diff --git a/drivers/s390/crypto/ap_bus.c b/drivers/s390/crypto/ap_bus.c
index d78b3d629d78..9cb3dfbcaddb 100644
--- a/drivers/s390/crypto/ap_bus.c
+++ b/drivers/s390/crypto/ap_bus.c
@@ -37,6 +37,7 @@
37#include <linux/notifier.h> 37#include <linux/notifier.h>
38#include <linux/kthread.h> 38#include <linux/kthread.h>
39#include <linux/mutex.h> 39#include <linux/mutex.h>
40#include <linux/suspend.h>
40#include <asm/reset.h> 41#include <asm/reset.h>
41#include <asm/airq.h> 42#include <asm/airq.h>
42#include <linux/atomic.h> 43#include <linux/atomic.h>
@@ -48,23 +49,6 @@
48 49
49#include "ap_bus.h" 50#include "ap_bus.h"
50 51
51/* Some prototypes. */
52static void ap_scan_bus(struct work_struct *);
53static void ap_poll_all(unsigned long);
54static enum hrtimer_restart ap_poll_timeout(struct hrtimer *);
55static int ap_poll_thread_start(void);
56static void ap_poll_thread_stop(void);
57static void ap_request_timeout(unsigned long);
58static inline void ap_schedule_poll_timer(void);
59static int __ap_poll_device(struct ap_device *ap_dev, unsigned long *flags);
60static int ap_device_remove(struct device *dev);
61static int ap_device_probe(struct device *dev);
62static void ap_interrupt_handler(struct airq_struct *airq);
63static void ap_reset(struct ap_device *ap_dev, unsigned long *flags);
64static void ap_config_timeout(unsigned long ptr);
65static int ap_select_domain(void);
66static void ap_query_configuration(void);
67
68/* 52/*
69 * Module description. 53 * Module description.
70 */ 54 */
@@ -92,17 +76,18 @@ static DEFINE_SPINLOCK(ap_device_list_lock);
92static LIST_HEAD(ap_device_list); 76static LIST_HEAD(ap_device_list);
93 77
94/* 78/*
95 * Workqueue & timer for bus rescan. 79 * Workqueue timer for bus rescan.
96 */ 80 */
97static struct workqueue_struct *ap_work_queue;
98static struct timer_list ap_config_timer; 81static struct timer_list ap_config_timer;
99static int ap_config_time = AP_CONFIG_TIME; 82static int ap_config_time = AP_CONFIG_TIME;
100static DECLARE_WORK(ap_config_work, ap_scan_bus); 83static void ap_scan_bus(struct work_struct *);
84static DECLARE_WORK(ap_scan_work, ap_scan_bus);
101 85
102/* 86/*
103 * Tasklet & timer for AP request polling and interrupts 87 * Tasklet & timer for AP request polling and interrupts
104 */ 88 */
105static DECLARE_TASKLET(ap_tasklet, ap_poll_all, 0); 89static void ap_tasklet_fn(unsigned long);
90static DECLARE_TASKLET(ap_tasklet, ap_tasklet_fn, 0);
106static atomic_t ap_poll_requests = ATOMIC_INIT(0); 91static atomic_t ap_poll_requests = ATOMIC_INIT(0);
107static DECLARE_WAIT_QUEUE_HEAD(ap_poll_wait); 92static DECLARE_WAIT_QUEUE_HEAD(ap_poll_wait);
108static struct task_struct *ap_poll_kthread = NULL; 93static struct task_struct *ap_poll_kthread = NULL;
@@ -115,6 +100,8 @@ static unsigned long long poll_timeout = 250000;
115 100
116/* Suspend flag */ 101/* Suspend flag */
117static int ap_suspend_flag; 102static int ap_suspend_flag;
103/* Maximum domain id */
104static int ap_max_domain_id;
118/* Flag to check if domain was set through module parameter domain=. This is 105/* Flag to check if domain was set through module parameter domain=. This is
119 * important when supsend and resume is done in a z/VM environment where the 106 * important when supsend and resume is done in a z/VM environment where the
120 * domain might change. */ 107 * domain might change. */
@@ -122,6 +109,8 @@ static int user_set_domain = 0;
122static struct bus_type ap_bus_type; 109static struct bus_type ap_bus_type;
123 110
124/* Adapter interrupt definitions */ 111/* Adapter interrupt definitions */
112static void ap_interrupt_handler(struct airq_struct *airq);
113
125static int ap_airq_flag; 114static int ap_airq_flag;
126 115
127static struct airq_struct ap_airq = { 116static struct airq_struct ap_airq = {
@@ -182,44 +171,27 @@ static int ap_configuration_available(void)
182/** 171/**
183 * ap_test_queue(): Test adjunct processor queue. 172 * ap_test_queue(): Test adjunct processor queue.
184 * @qid: The AP queue number 173 * @qid: The AP queue number
185 * @queue_depth: Pointer to queue depth value 174 * @info: Pointer to queue descriptor
186 * @device_type: Pointer to device type value
187 * 175 *
188 * Returns AP queue status structure. 176 * Returns AP queue status structure.
189 */ 177 */
190static inline struct ap_queue_status 178static inline struct ap_queue_status
191ap_test_queue(ap_qid_t qid, int *queue_depth, int *device_type) 179ap_test_queue(ap_qid_t qid, unsigned long *info)
192{ 180{
193 register unsigned long reg0 asm ("0") = qid; 181 register unsigned long reg0 asm ("0") = qid;
194 register struct ap_queue_status reg1 asm ("1"); 182 register struct ap_queue_status reg1 asm ("1");
195 register unsigned long reg2 asm ("2") = 0UL; 183 register unsigned long reg2 asm ("2") = 0UL;
196 184
185 if (test_facility(15))
186 reg0 |= 1UL << 23; /* set APFT T bit*/
197 asm volatile(".long 0xb2af0000" /* PQAP(TAPQ) */ 187 asm volatile(".long 0xb2af0000" /* PQAP(TAPQ) */
198 : "+d" (reg0), "=d" (reg1), "+d" (reg2) : : "cc"); 188 : "+d" (reg0), "=d" (reg1), "+d" (reg2) : : "cc");
199 *device_type = (int) (reg2 >> 24); 189 if (info)
200 *queue_depth = (int) (reg2 & 0xff); 190 *info = reg2;
201 return reg1; 191 return reg1;
202} 192}
203 193
204/** 194/**
205 * ap_query_facilities(): PQAP(TAPQ) query facilities.
206 * @qid: The AP queue number
207 *
208 * Returns content of general register 2 after the PQAP(TAPQ)
209 * instruction was called.
210 */
211static inline unsigned long ap_query_facilities(ap_qid_t qid)
212{
213 register unsigned long reg0 asm ("0") = qid | 0x00800000UL;
214 register unsigned long reg1 asm ("1");
215 register unsigned long reg2 asm ("2") = 0UL;
216
217 asm volatile(".long 0xb2af0000" /* PQAP(TAPQ) */
218 : "+d" (reg0), "=d" (reg1), "+d" (reg2) : : "cc");
219 return reg2;
220}
221
222/**
223 * ap_reset_queue(): Reset adjunct processor queue. 195 * ap_reset_queue(): Reset adjunct processor queue.
224 * @qid: The AP queue number 196 * @qid: The AP queue number
225 * 197 *
@@ -259,31 +231,19 @@ ap_queue_interruption_control(ap_qid_t qid, void *ind)
259 return reg1_out; 231 return reg1_out;
260} 232}
261 233
262static inline struct ap_queue_status 234/**
263__ap_query_functions(ap_qid_t qid, unsigned int *functions) 235 * ap_query_configuration(): Get AP configuration data
264{ 236 *
265 register unsigned long reg0 asm ("0") = 0UL | qid | (1UL << 23); 237 * Returns 0 on success, or -EOPNOTSUPP.
266 register struct ap_queue_status reg1 asm ("1") = AP_QUEUE_STATUS_INVALID; 238 */
267 register unsigned long reg2 asm ("2"); 239static inline int ap_query_configuration(void)
268
269 asm volatile(
270 ".long 0xb2af0000\n" /* PQAP(TAPQ) */
271 "0:\n"
272 EX_TABLE(0b, 0b)
273 : "+d" (reg0), "+d" (reg1), "=d" (reg2)
274 :
275 : "cc");
276
277 *functions = (unsigned int)(reg2 >> 32);
278 return reg1;
279}
280
281static inline int __ap_query_configuration(struct ap_config_info *config)
282{ 240{
283 register unsigned long reg0 asm ("0") = 0x04000000UL; 241 register unsigned long reg0 asm ("0") = 0x04000000UL;
284 register unsigned long reg1 asm ("1") = -EINVAL; 242 register unsigned long reg1 asm ("1") = -EINVAL;
285 register unsigned char *reg2 asm ("2") = (unsigned char *)config; 243 register void *reg2 asm ("2") = (void *) ap_configuration;
286 244
245 if (!ap_configuration)
246 return -EOPNOTSUPP;
287 asm volatile( 247 asm volatile(
288 ".long 0xb2af0000\n" /* PQAP(QCI) */ 248 ".long 0xb2af0000\n" /* PQAP(QCI) */
289 "0: la %1,0\n" 249 "0: la %1,0\n"
@@ -297,39 +257,60 @@ static inline int __ap_query_configuration(struct ap_config_info *config)
297} 257}
298 258
299/** 259/**
300 * ap_query_functions(): Query supported functions. 260 * ap_init_configuration(): Allocate and query configuration array.
301 * @qid: The AP queue number
302 * @functions: Pointer to functions field.
303 *
304 * Returns
305 * 0 on success.
306 * -ENODEV if queue not valid.
307 * -EBUSY if device busy.
308 * -EINVAL if query function is not supported
309 */ 261 */
310static int ap_query_functions(ap_qid_t qid, unsigned int *functions) 262static void ap_init_configuration(void)
311{ 263{
312 struct ap_queue_status status; 264 if (!ap_configuration_available())
265 return;
313 266
314 status = __ap_query_functions(qid, functions); 267 ap_configuration = kzalloc(sizeof(*ap_configuration), GFP_KERNEL);
268 if (!ap_configuration)
269 return;
270 if (ap_query_configuration() != 0) {
271 kfree(ap_configuration);
272 ap_configuration = NULL;
273 return;
274 }
275}
315 276
316 if (ap_queue_status_invalid_test(&status)) 277/*
317 return -ENODEV; 278 * ap_test_config(): helper function to extract the nrth bit
279 * within the unsigned int array field.
280 */
281static inline int ap_test_config(unsigned int *field, unsigned int nr)
282{
283 return ap_test_bit((field + (nr >> 5)), (nr & 0x1f));
284}
318 285
319 switch (status.response_code) { 286/*
320 case AP_RESPONSE_NORMAL: 287 * ap_test_config_card_id(): Test, whether an AP card ID is configured.
321 return 0; 288 * @id AP card ID
322 case AP_RESPONSE_Q_NOT_AVAIL: 289 *
323 case AP_RESPONSE_DECONFIGURED: 290 * Returns 0 if the card is not configured
324 case AP_RESPONSE_CHECKSTOPPED: 291 * 1 if the card is configured or
325 case AP_RESPONSE_INVALID_ADDRESS: 292 * if the configuration information is not available
326 return -ENODEV; 293 */
327 case AP_RESPONSE_RESET_IN_PROGRESS: 294static inline int ap_test_config_card_id(unsigned int id)
328 case AP_RESPONSE_BUSY: 295{
329 case AP_RESPONSE_OTHERWISE_CHANGED: 296 if (!ap_configuration) /* QCI not supported */
330 default: 297 return 1;
331 return -EBUSY; 298 return ap_test_config(ap_configuration->apm, id);
332 } 299}
300
301/*
302 * ap_test_config_domain(): Test, whether an AP usage domain is configured.
303 * @domain AP usage domain ID
304 *
305 * Returns 0 if the usage domain is not configured
306 * 1 if the usage domain is configured or
307 * if the configuration information is not available
308 */
309static inline int ap_test_config_domain(unsigned int domain)
310{
311 if (!ap_configuration) /* QCI not supported */
312 return domain < 16;
313 return ap_test_config(ap_configuration->aqm, domain);
333} 314}
334 315
335/** 316/**
@@ -354,7 +335,9 @@ static int ap_queue_enable_interruption(struct ap_device *ap_dev, void *ind)
354 case AP_RESPONSE_DECONFIGURED: 335 case AP_RESPONSE_DECONFIGURED:
355 case AP_RESPONSE_CHECKSTOPPED: 336 case AP_RESPONSE_CHECKSTOPPED:
356 case AP_RESPONSE_INVALID_ADDRESS: 337 case AP_RESPONSE_INVALID_ADDRESS:
357 return -ENODEV; 338 pr_err("Registering adapter interrupts for AP %d failed\n",
339 AP_QID_DEVICE(ap_dev->qid));
340 return -EOPNOTSUPP;
358 case AP_RESPONSE_RESET_IN_PROGRESS: 341 case AP_RESPONSE_RESET_IN_PROGRESS:
359 case AP_RESPONSE_BUSY: 342 case AP_RESPONSE_BUSY:
360 default: 343 default:
@@ -480,136 +463,559 @@ int ap_recv(ap_qid_t qid, unsigned long long *psmid, void *msg, size_t length)
480EXPORT_SYMBOL(ap_recv); 463EXPORT_SYMBOL(ap_recv);
481 464
482/** 465/**
483 * __ap_schedule_poll_timer(): Schedule poll timer. 466 * ap_query_queue(): Check if an AP queue is available.
484 * 467 * @qid: The AP queue number
485 * Set up the timer to run the poll tasklet 468 * @queue_depth: Pointer to queue depth value
469 * @device_type: Pointer to device type value
470 * @facilities: Pointer to facility indicator
486 */ 471 */
487static inline void __ap_schedule_poll_timer(void) 472static int ap_query_queue(ap_qid_t qid, int *queue_depth, int *device_type,
473 unsigned int *facilities)
474{
475 struct ap_queue_status status;
476 unsigned long info;
477 int nd;
478
479 if (!ap_test_config_card_id(AP_QID_DEVICE(qid)))
480 return -ENODEV;
481
482 status = ap_test_queue(qid, &info);
483 switch (status.response_code) {
484 case AP_RESPONSE_NORMAL:
485 *queue_depth = (int)(info & 0xff);
486 *device_type = (int)((info >> 24) & 0xff);
487 *facilities = (unsigned int)(info >> 32);
488 /* Update maximum domain id */
489 nd = (info >> 16) & 0xff;
490 if ((info & (1UL << 57)) && nd > 0)
491 ap_max_domain_id = nd;
492 return 0;
493 case AP_RESPONSE_Q_NOT_AVAIL:
494 case AP_RESPONSE_DECONFIGURED:
495 case AP_RESPONSE_CHECKSTOPPED:
496 case AP_RESPONSE_INVALID_ADDRESS:
497 return -ENODEV;
498 case AP_RESPONSE_RESET_IN_PROGRESS:
499 case AP_RESPONSE_OTHERWISE_CHANGED:
500 case AP_RESPONSE_BUSY:
501 return -EBUSY;
502 default:
503 BUG();
504 }
505}
506
507/* State machine definitions and helpers */
508
509static void ap_sm_wait(enum ap_wait wait)
488{ 510{
489 ktime_t hr_time; 511 ktime_t hr_time;
490 512
491 spin_lock_bh(&ap_poll_timer_lock); 513 switch (wait) {
492 if (!hrtimer_is_queued(&ap_poll_timer) && !ap_suspend_flag) { 514 case AP_WAIT_AGAIN:
493 hr_time = ktime_set(0, poll_timeout); 515 case AP_WAIT_INTERRUPT:
494 hrtimer_forward_now(&ap_poll_timer, hr_time); 516 if (ap_using_interrupts())
495 hrtimer_restart(&ap_poll_timer); 517 break;
518 if (ap_poll_kthread) {
519 wake_up(&ap_poll_wait);
520 break;
521 }
522 /* Fall through */
523 case AP_WAIT_TIMEOUT:
524 spin_lock_bh(&ap_poll_timer_lock);
525 if (!hrtimer_is_queued(&ap_poll_timer)) {
526 hr_time = ktime_set(0, poll_timeout);
527 hrtimer_forward_now(&ap_poll_timer, hr_time);
528 hrtimer_restart(&ap_poll_timer);
529 }
530 spin_unlock_bh(&ap_poll_timer_lock);
531 break;
532 case AP_WAIT_NONE:
533 default:
534 break;
496 } 535 }
497 spin_unlock_bh(&ap_poll_timer_lock); 536}
537
538static enum ap_wait ap_sm_nop(struct ap_device *ap_dev)
539{
540 return AP_WAIT_NONE;
498} 541}
499 542
500/** 543/**
501 * ap_schedule_poll_timer(): Schedule poll timer. 544 * ap_sm_recv(): Receive pending reply messages from an AP device but do
545 * not change the state of the device.
546 * @ap_dev: pointer to the AP device
502 * 547 *
503 * Set up the timer to run the poll tasklet 548 * Returns AP_WAIT_NONE, AP_WAIT_AGAIN, or AP_WAIT_INTERRUPT
504 */ 549 */
505static inline void ap_schedule_poll_timer(void) 550static struct ap_queue_status ap_sm_recv(struct ap_device *ap_dev)
506{ 551{
507 if (ap_using_interrupts()) 552 struct ap_queue_status status;
508 return; 553 struct ap_message *ap_msg;
509 __ap_schedule_poll_timer(); 554
555 status = __ap_recv(ap_dev->qid, &ap_dev->reply->psmid,
556 ap_dev->reply->message, ap_dev->reply->length);
557 switch (status.response_code) {
558 case AP_RESPONSE_NORMAL:
559 atomic_dec(&ap_poll_requests);
560 ap_dev->queue_count--;
561 if (ap_dev->queue_count > 0)
562 mod_timer(&ap_dev->timeout,
563 jiffies + ap_dev->drv->request_timeout);
564 list_for_each_entry(ap_msg, &ap_dev->pendingq, list) {
565 if (ap_msg->psmid != ap_dev->reply->psmid)
566 continue;
567 list_del_init(&ap_msg->list);
568 ap_dev->pendingq_count--;
569 ap_msg->receive(ap_dev, ap_msg, ap_dev->reply);
570 break;
571 }
572 case AP_RESPONSE_NO_PENDING_REPLY:
573 if (!status.queue_empty || ap_dev->queue_count <= 0)
574 break;
575 /* The card shouldn't forget requests but who knows. */
576 atomic_sub(ap_dev->queue_count, &ap_poll_requests);
577 ap_dev->queue_count = 0;
578 list_splice_init(&ap_dev->pendingq, &ap_dev->requestq);
579 ap_dev->requestq_count += ap_dev->pendingq_count;
580 ap_dev->pendingq_count = 0;
581 break;
582 default:
583 break;
584 }
585 return status;
510} 586}
511 587
588/**
589 * ap_sm_read(): Receive pending reply messages from an AP device.
590 * @ap_dev: pointer to the AP device
591 *
592 * Returns AP_WAIT_NONE, AP_WAIT_AGAIN, or AP_WAIT_INTERRUPT
593 */
594static enum ap_wait ap_sm_read(struct ap_device *ap_dev)
595{
596 struct ap_queue_status status;
597
598 status = ap_sm_recv(ap_dev);
599 switch (status.response_code) {
600 case AP_RESPONSE_NORMAL:
601 if (ap_dev->queue_count > 0)
602 return AP_WAIT_AGAIN;
603 ap_dev->state = AP_STATE_IDLE;
604 return AP_WAIT_NONE;
605 case AP_RESPONSE_NO_PENDING_REPLY:
606 if (ap_dev->queue_count > 0)
607 return AP_WAIT_INTERRUPT;
608 ap_dev->state = AP_STATE_IDLE;
609 return AP_WAIT_NONE;
610 default:
611 ap_dev->state = AP_STATE_BORKED;
612 return AP_WAIT_NONE;
613 }
614}
512 615
513/** 616/**
514 * ap_query_queue(): Check if an AP queue is available. 617 * ap_sm_write(): Send messages from the request queue to an AP device.
515 * @qid: The AP queue number 618 * @ap_dev: pointer to the AP device
516 * @queue_depth: Pointer to queue depth value 619 *
517 * @device_type: Pointer to device type value 620 * Returns AP_WAIT_NONE, AP_WAIT_AGAIN, or AP_WAIT_INTERRUPT
518 */ 621 */
519static int ap_query_queue(ap_qid_t qid, int *queue_depth, int *device_type) 622static enum ap_wait ap_sm_write(struct ap_device *ap_dev)
520{ 623{
521 struct ap_queue_status status; 624 struct ap_queue_status status;
522 int t_depth, t_device_type; 625 struct ap_message *ap_msg;
523 626
524 status = ap_test_queue(qid, &t_depth, &t_device_type); 627 if (ap_dev->requestq_count <= 0)
628 return AP_WAIT_NONE;
629 /* Start the next request on the queue. */
630 ap_msg = list_entry(ap_dev->requestq.next, struct ap_message, list);
631 status = __ap_send(ap_dev->qid, ap_msg->psmid,
632 ap_msg->message, ap_msg->length, ap_msg->special);
525 switch (status.response_code) { 633 switch (status.response_code) {
526 case AP_RESPONSE_NORMAL: 634 case AP_RESPONSE_NORMAL:
527 *queue_depth = t_depth + 1; 635 atomic_inc(&ap_poll_requests);
528 *device_type = t_device_type; 636 ap_dev->queue_count++;
529 return 0; 637 if (ap_dev->queue_count == 1)
530 case AP_RESPONSE_Q_NOT_AVAIL: 638 mod_timer(&ap_dev->timeout,
531 case AP_RESPONSE_DECONFIGURED: 639 jiffies + ap_dev->drv->request_timeout);
532 case AP_RESPONSE_CHECKSTOPPED: 640 list_move_tail(&ap_msg->list, &ap_dev->pendingq);
533 case AP_RESPONSE_INVALID_ADDRESS: 641 ap_dev->requestq_count--;
534 return -ENODEV; 642 ap_dev->pendingq_count++;
643 if (ap_dev->queue_count < ap_dev->queue_depth) {
644 ap_dev->state = AP_STATE_WORKING;
645 return AP_WAIT_AGAIN;
646 }
647 /* fall through */
648 case AP_RESPONSE_Q_FULL:
649 ap_dev->state = AP_STATE_QUEUE_FULL;
650 return AP_WAIT_INTERRUPT;
535 case AP_RESPONSE_RESET_IN_PROGRESS: 651 case AP_RESPONSE_RESET_IN_PROGRESS:
536 case AP_RESPONSE_OTHERWISE_CHANGED: 652 ap_dev->state = AP_STATE_RESET_WAIT;
537 case AP_RESPONSE_BUSY: 653 return AP_WAIT_TIMEOUT;
538 return -EBUSY; 654 case AP_RESPONSE_MESSAGE_TOO_BIG:
655 case AP_RESPONSE_REQ_FAC_NOT_INST:
656 list_del_init(&ap_msg->list);
657 ap_dev->requestq_count--;
658 ap_msg->rc = -EINVAL;
659 ap_msg->receive(ap_dev, ap_msg, NULL);
660 return AP_WAIT_AGAIN;
539 default: 661 default:
540 BUG(); 662 ap_dev->state = AP_STATE_BORKED;
663 return AP_WAIT_NONE;
541 } 664 }
542} 665}
543 666
544/** 667/**
545 * ap_init_queue(): Reset an AP queue. 668 * ap_sm_read_write(): Send and receive messages to/from an AP device.
669 * @ap_dev: pointer to the AP device
670 *
671 * Returns AP_WAIT_NONE, AP_WAIT_AGAIN, or AP_WAIT_INTERRUPT
672 */
673static enum ap_wait ap_sm_read_write(struct ap_device *ap_dev)
674{
675 return min(ap_sm_read(ap_dev), ap_sm_write(ap_dev));
676}
677
678/**
679 * ap_sm_reset(): Reset an AP queue.
546 * @qid: The AP queue number 680 * @qid: The AP queue number
547 * 681 *
548 * Submit the Reset command to an AP queue. 682 * Submit the Reset command to an AP queue.
549 * Since the reset is asynchron set the state to 'RESET_IN_PROGRESS'
550 * and check later via ap_poll_queue() if the reset is done.
551 */ 683 */
552static int ap_init_queue(struct ap_device *ap_dev) 684static enum ap_wait ap_sm_reset(struct ap_device *ap_dev)
553{ 685{
554 struct ap_queue_status status; 686 struct ap_queue_status status;
555 687
556 status = ap_reset_queue(ap_dev->qid); 688 status = ap_reset_queue(ap_dev->qid);
557 switch (status.response_code) { 689 switch (status.response_code) {
558 case AP_RESPONSE_NORMAL: 690 case AP_RESPONSE_NORMAL:
559 ap_dev->interrupt = AP_INTR_DISABLED;
560 ap_dev->reset = AP_RESET_IN_PROGRESS;
561 return 0;
562 case AP_RESPONSE_RESET_IN_PROGRESS: 691 case AP_RESPONSE_RESET_IN_PROGRESS:
692 ap_dev->state = AP_STATE_RESET_WAIT;
693 ap_dev->interrupt = AP_INTR_DISABLED;
694 return AP_WAIT_TIMEOUT;
563 case AP_RESPONSE_BUSY: 695 case AP_RESPONSE_BUSY:
564 return -EBUSY; 696 return AP_WAIT_TIMEOUT;
565 case AP_RESPONSE_Q_NOT_AVAIL: 697 case AP_RESPONSE_Q_NOT_AVAIL:
566 case AP_RESPONSE_DECONFIGURED: 698 case AP_RESPONSE_DECONFIGURED:
567 case AP_RESPONSE_CHECKSTOPPED: 699 case AP_RESPONSE_CHECKSTOPPED:
568 default: 700 default:
569 return -ENODEV; 701 ap_dev->state = AP_STATE_BORKED;
702 return AP_WAIT_NONE;
570 } 703 }
571} 704}
572 705
573/** 706/**
574 * ap_increase_queue_count(): Arm request timeout. 707 * ap_sm_reset_wait(): Test queue for completion of the reset operation
575 * @ap_dev: Pointer to an AP device. 708 * @ap_dev: pointer to the AP device
576 * 709 *
577 * Arm request timeout if an AP device was idle and a new request is submitted. 710 * Returns AP_POLL_IMMEDIATELY, AP_POLL_AFTER_TIMEROUT or 0.
578 */ 711 */
579static void ap_increase_queue_count(struct ap_device *ap_dev) 712static enum ap_wait ap_sm_reset_wait(struct ap_device *ap_dev)
580{ 713{
581 int timeout = ap_dev->drv->request_timeout; 714 struct ap_queue_status status;
715 unsigned long info;
582 716
583 ap_dev->queue_count++; 717 if (ap_dev->queue_count > 0)
584 if (ap_dev->queue_count == 1) { 718 /* Try to read a completed message and get the status */
585 mod_timer(&ap_dev->timeout, jiffies + timeout); 719 status = ap_sm_recv(ap_dev);
586 ap_dev->reset = AP_RESET_ARMED; 720 else
721 /* Get the status with TAPQ */
722 status = ap_test_queue(ap_dev->qid, &info);
723
724 switch (status.response_code) {
725 case AP_RESPONSE_NORMAL:
726 if (ap_using_interrupts() &&
727 ap_queue_enable_interruption(ap_dev,
728 ap_airq.lsi_ptr) == 0)
729 ap_dev->state = AP_STATE_SETIRQ_WAIT;
730 else
731 ap_dev->state = (ap_dev->queue_count > 0) ?
732 AP_STATE_WORKING : AP_STATE_IDLE;
733 return AP_WAIT_AGAIN;
734 case AP_RESPONSE_BUSY:
735 case AP_RESPONSE_RESET_IN_PROGRESS:
736 return AP_WAIT_TIMEOUT;
737 case AP_RESPONSE_Q_NOT_AVAIL:
738 case AP_RESPONSE_DECONFIGURED:
739 case AP_RESPONSE_CHECKSTOPPED:
740 default:
741 ap_dev->state = AP_STATE_BORKED;
742 return AP_WAIT_NONE;
587 } 743 }
588} 744}
589 745
590/** 746/**
591 * ap_decrease_queue_count(): Decrease queue count. 747 * ap_sm_setirq_wait(): Test queue for completion of the irq enablement
592 * @ap_dev: Pointer to an AP device. 748 * @ap_dev: pointer to the AP device
593 * 749 *
594 * If AP device is still alive, re-schedule request timeout if there are still 750 * Returns AP_POLL_IMMEDIATELY, AP_POLL_AFTER_TIMEROUT or 0.
595 * pending requests.
596 */ 751 */
597static void ap_decrease_queue_count(struct ap_device *ap_dev) 752static enum ap_wait ap_sm_setirq_wait(struct ap_device *ap_dev)
598{ 753{
599 int timeout = ap_dev->drv->request_timeout; 754 struct ap_queue_status status;
755 unsigned long info;
600 756
601 ap_dev->queue_count--;
602 if (ap_dev->queue_count > 0) 757 if (ap_dev->queue_count > 0)
603 mod_timer(&ap_dev->timeout, jiffies + timeout); 758 /* Try to read a completed message and get the status */
759 status = ap_sm_recv(ap_dev);
604 else 760 else
605 /* 761 /* Get the status with TAPQ */
606 * The timeout timer should to be disabled now - since 762 status = ap_test_queue(ap_dev->qid, &info);
607 * del_timer_sync() is very expensive, we just tell via the 763
608 * reset flag to ignore the pending timeout timer. 764 if (status.int_enabled == 1) {
609 */ 765 /* Irqs are now enabled */
610 ap_dev->reset = AP_RESET_IGNORE; 766 ap_dev->interrupt = AP_INTR_ENABLED;
767 ap_dev->state = (ap_dev->queue_count > 0) ?
768 AP_STATE_WORKING : AP_STATE_IDLE;
769 }
770
771 switch (status.response_code) {
772 case AP_RESPONSE_NORMAL:
773 if (ap_dev->queue_count > 0)
774 return AP_WAIT_AGAIN;
775 /* fallthrough */
776 case AP_RESPONSE_NO_PENDING_REPLY:
777 return AP_WAIT_TIMEOUT;
778 default:
779 ap_dev->state = AP_STATE_BORKED;
780 return AP_WAIT_NONE;
781 }
782}
783
784/*
785 * AP state machine jump table
786 */
787ap_func_t *ap_jumptable[NR_AP_STATES][NR_AP_EVENTS] = {
788 [AP_STATE_RESET_START] = {
789 [AP_EVENT_POLL] = ap_sm_reset,
790 [AP_EVENT_TIMEOUT] = ap_sm_nop,
791 },
792 [AP_STATE_RESET_WAIT] = {
793 [AP_EVENT_POLL] = ap_sm_reset_wait,
794 [AP_EVENT_TIMEOUT] = ap_sm_nop,
795 },
796 [AP_STATE_SETIRQ_WAIT] = {
797 [AP_EVENT_POLL] = ap_sm_setirq_wait,
798 [AP_EVENT_TIMEOUT] = ap_sm_nop,
799 },
800 [AP_STATE_IDLE] = {
801 [AP_EVENT_POLL] = ap_sm_write,
802 [AP_EVENT_TIMEOUT] = ap_sm_nop,
803 },
804 [AP_STATE_WORKING] = {
805 [AP_EVENT_POLL] = ap_sm_read_write,
806 [AP_EVENT_TIMEOUT] = ap_sm_reset,
807 },
808 [AP_STATE_QUEUE_FULL] = {
809 [AP_EVENT_POLL] = ap_sm_read,
810 [AP_EVENT_TIMEOUT] = ap_sm_reset,
811 },
812 [AP_STATE_SUSPEND_WAIT] = {
813 [AP_EVENT_POLL] = ap_sm_read,
814 [AP_EVENT_TIMEOUT] = ap_sm_nop,
815 },
816 [AP_STATE_BORKED] = {
817 [AP_EVENT_POLL] = ap_sm_nop,
818 [AP_EVENT_TIMEOUT] = ap_sm_nop,
819 },
820};
821
822static inline enum ap_wait ap_sm_event(struct ap_device *ap_dev,
823 enum ap_event event)
824{
825 return ap_jumptable[ap_dev->state][event](ap_dev);
826}
827
828static inline enum ap_wait ap_sm_event_loop(struct ap_device *ap_dev,
829 enum ap_event event)
830{
831 enum ap_wait wait;
832
833 while ((wait = ap_sm_event(ap_dev, event)) == AP_WAIT_AGAIN)
834 ;
835 return wait;
836}
837
838/**
839 * ap_request_timeout(): Handling of request timeouts
840 * @data: Holds the AP device.
841 *
842 * Handles request timeouts.
843 */
844static void ap_request_timeout(unsigned long data)
845{
846 struct ap_device *ap_dev = (struct ap_device *) data;
847
848 if (ap_suspend_flag)
849 return;
850 spin_lock_bh(&ap_dev->lock);
851 ap_sm_wait(ap_sm_event(ap_dev, AP_EVENT_TIMEOUT));
852 spin_unlock_bh(&ap_dev->lock);
853}
854
855/**
856 * ap_poll_timeout(): AP receive polling for finished AP requests.
857 * @unused: Unused pointer.
858 *
859 * Schedules the AP tasklet using a high resolution timer.
860 */
861static enum hrtimer_restart ap_poll_timeout(struct hrtimer *unused)
862{
863 if (!ap_suspend_flag)
864 tasklet_schedule(&ap_tasklet);
865 return HRTIMER_NORESTART;
866}
867
868/**
869 * ap_interrupt_handler() - Schedule ap_tasklet on interrupt
870 * @airq: pointer to adapter interrupt descriptor
871 */
872static void ap_interrupt_handler(struct airq_struct *airq)
873{
874 inc_irq_stat(IRQIO_APB);
875 if (!ap_suspend_flag)
876 tasklet_schedule(&ap_tasklet);
877}
878
879/**
880 * ap_tasklet_fn(): Tasklet to poll all AP devices.
881 * @dummy: Unused variable
882 *
883 * Poll all AP devices on the bus.
884 */
885static void ap_tasklet_fn(unsigned long dummy)
886{
887 struct ap_device *ap_dev;
888 enum ap_wait wait = AP_WAIT_NONE;
889
890 /* Reset the indicator if interrupts are used. Thus new interrupts can
891 * be received. Doing it in the beginning of the tasklet is therefor
892 * important that no requests on any AP get lost.
893 */
894 if (ap_using_interrupts())
895 xchg(ap_airq.lsi_ptr, 0);
896
897 spin_lock(&ap_device_list_lock);
898 list_for_each_entry(ap_dev, &ap_device_list, list) {
899 spin_lock_bh(&ap_dev->lock);
900 wait = min(wait, ap_sm_event_loop(ap_dev, AP_EVENT_POLL));
901 spin_unlock_bh(&ap_dev->lock);
902 }
903 spin_unlock(&ap_device_list_lock);
904 ap_sm_wait(wait);
905}
906
907/**
908 * ap_poll_thread(): Thread that polls for finished requests.
909 * @data: Unused pointer
910 *
911 * AP bus poll thread. The purpose of this thread is to poll for
912 * finished requests in a loop if there is a "free" cpu - that is
913 * a cpu that doesn't have anything better to do. The polling stops
914 * as soon as there is another task or if all messages have been
915 * delivered.
916 */
917static int ap_poll_thread(void *data)
918{
919 DECLARE_WAITQUEUE(wait, current);
920
921 set_user_nice(current, MAX_NICE);
922 set_freezable();
923 while (!kthread_should_stop()) {
924 add_wait_queue(&ap_poll_wait, &wait);
925 set_current_state(TASK_INTERRUPTIBLE);
926 if (ap_suspend_flag ||
927 atomic_read(&ap_poll_requests) <= 0) {
928 schedule();
929 try_to_freeze();
930 }
931 set_current_state(TASK_RUNNING);
932 remove_wait_queue(&ap_poll_wait, &wait);
933 if (need_resched()) {
934 schedule();
935 try_to_freeze();
936 continue;
937 }
938 ap_tasklet_fn(0);
939 } while (!kthread_should_stop());
940 return 0;
941}
942
943static int ap_poll_thread_start(void)
944{
945 int rc;
946
947 if (ap_using_interrupts() || ap_poll_kthread)
948 return 0;
949 mutex_lock(&ap_poll_thread_mutex);
950 ap_poll_kthread = kthread_run(ap_poll_thread, NULL, "appoll");
951 rc = PTR_RET(ap_poll_kthread);
952 if (rc)
953 ap_poll_kthread = NULL;
954 mutex_unlock(&ap_poll_thread_mutex);
955 return rc;
956}
957
958static void ap_poll_thread_stop(void)
959{
960 if (!ap_poll_kthread)
961 return;
962 mutex_lock(&ap_poll_thread_mutex);
963 kthread_stop(ap_poll_kthread);
964 ap_poll_kthread = NULL;
965 mutex_unlock(&ap_poll_thread_mutex);
611} 966}
612 967
968/**
969 * ap_queue_message(): Queue a request to an AP device.
970 * @ap_dev: The AP device to queue the message to
971 * @ap_msg: The message that is to be added
972 */
973void ap_queue_message(struct ap_device *ap_dev, struct ap_message *ap_msg)
974{
975 /* For asynchronous message handling a valid receive-callback
976 * is required. */
977 BUG_ON(!ap_msg->receive);
978
979 spin_lock_bh(&ap_dev->lock);
980 /* Queue the message. */
981 list_add_tail(&ap_msg->list, &ap_dev->requestq);
982 ap_dev->requestq_count++;
983 ap_dev->total_request_count++;
984 /* Send/receive as many request from the queue as possible. */
985 ap_sm_wait(ap_sm_event_loop(ap_dev, AP_EVENT_POLL));
986 spin_unlock_bh(&ap_dev->lock);
987}
988EXPORT_SYMBOL(ap_queue_message);
989
990/**
991 * ap_cancel_message(): Cancel a crypto request.
992 * @ap_dev: The AP device that has the message queued
993 * @ap_msg: The message that is to be removed
994 *
995 * Cancel a crypto request. This is done by removing the request
996 * from the device pending or request queue. Note that the
997 * request stays on the AP queue. When it finishes the message
998 * reply will be discarded because the psmid can't be found.
999 */
1000void ap_cancel_message(struct ap_device *ap_dev, struct ap_message *ap_msg)
1001{
1002 struct ap_message *tmp;
1003
1004 spin_lock_bh(&ap_dev->lock);
1005 if (!list_empty(&ap_msg->list)) {
1006 list_for_each_entry(tmp, &ap_dev->pendingq, list)
1007 if (tmp->psmid == ap_msg->psmid) {
1008 ap_dev->pendingq_count--;
1009 goto found;
1010 }
1011 ap_dev->requestq_count--;
1012found:
1013 list_del_init(&ap_msg->list);
1014 }
1015 spin_unlock_bh(&ap_dev->lock);
1016}
1017EXPORT_SYMBOL(ap_cancel_message);
1018
613/* 1019/*
614 * AP device related attributes. 1020 * AP device related attributes.
615 */ 1021 */
@@ -690,21 +1096,17 @@ static ssize_t ap_reset_show(struct device *dev,
690 int rc = 0; 1096 int rc = 0;
691 1097
692 spin_lock_bh(&ap_dev->lock); 1098 spin_lock_bh(&ap_dev->lock);
693 switch (ap_dev->reset) { 1099 switch (ap_dev->state) {
694 case AP_RESET_IGNORE: 1100 case AP_STATE_RESET_START:
695 rc = snprintf(buf, PAGE_SIZE, "No Reset Timer set.\n"); 1101 case AP_STATE_RESET_WAIT:
1102 rc = snprintf(buf, PAGE_SIZE, "Reset in progress.\n");
696 break; 1103 break;
697 case AP_RESET_ARMED: 1104 case AP_STATE_WORKING:
1105 case AP_STATE_QUEUE_FULL:
698 rc = snprintf(buf, PAGE_SIZE, "Reset Timer armed.\n"); 1106 rc = snprintf(buf, PAGE_SIZE, "Reset Timer armed.\n");
699 break; 1107 break;
700 case AP_RESET_DO:
701 rc = snprintf(buf, PAGE_SIZE, "Reset Timer expired.\n");
702 break;
703 case AP_RESET_IN_PROGRESS:
704 rc = snprintf(buf, PAGE_SIZE, "Reset in progress.\n");
705 break;
706 default: 1108 default:
707 break; 1109 rc = snprintf(buf, PAGE_SIZE, "No Reset Timer set.\n");
708 } 1110 }
709 spin_unlock_bh(&ap_dev->lock); 1111 spin_unlock_bh(&ap_dev->lock);
710 return rc; 1112 return rc;
@@ -719,17 +1121,12 @@ static ssize_t ap_interrupt_show(struct device *dev,
719 int rc = 0; 1121 int rc = 0;
720 1122
721 spin_lock_bh(&ap_dev->lock); 1123 spin_lock_bh(&ap_dev->lock);
722 switch (ap_dev->interrupt) { 1124 if (ap_dev->state == AP_STATE_SETIRQ_WAIT)
723 case AP_INTR_DISABLED:
724 rc = snprintf(buf, PAGE_SIZE, "Interrupts disabled.\n");
725 break;
726 case AP_INTR_ENABLED:
727 rc = snprintf(buf, PAGE_SIZE, "Interrupts enabled.\n");
728 break;
729 case AP_INTR_IN_PROGRESS:
730 rc = snprintf(buf, PAGE_SIZE, "Enable Interrupt pending.\n"); 1125 rc = snprintf(buf, PAGE_SIZE, "Enable Interrupt pending.\n");
731 break; 1126 else if (ap_dev->interrupt == AP_INTR_ENABLED)
732 } 1127 rc = snprintf(buf, PAGE_SIZE, "Interrupts enabled.\n");
1128 else
1129 rc = snprintf(buf, PAGE_SIZE, "Interrupts disabled.\n");
733 spin_unlock_bh(&ap_dev->lock); 1130 spin_unlock_bh(&ap_dev->lock);
734 return rc; 1131 return rc;
735} 1132}
@@ -823,99 +1220,95 @@ static int ap_uevent (struct device *dev, struct kobj_uevent_env *env)
823 return retval; 1220 return retval;
824} 1221}
825 1222
826static int ap_bus_suspend(struct device *dev, pm_message_t state) 1223static int ap_dev_suspend(struct device *dev, pm_message_t state)
827{ 1224{
828 struct ap_device *ap_dev = to_ap_dev(dev); 1225 struct ap_device *ap_dev = to_ap_dev(dev);
829 unsigned long flags;
830
831 if (!ap_suspend_flag) {
832 ap_suspend_flag = 1;
833
834 /* Disable scanning for devices, thus we do not want to scan
835 * for them after removing.
836 */
837 del_timer_sync(&ap_config_timer);
838 if (ap_work_queue != NULL) {
839 destroy_workqueue(ap_work_queue);
840 ap_work_queue = NULL;
841 }
842 1226
843 tasklet_disable(&ap_tasklet);
844 }
845 /* Poll on the device until all requests are finished. */ 1227 /* Poll on the device until all requests are finished. */
846 do {
847 flags = 0;
848 spin_lock_bh(&ap_dev->lock);
849 __ap_poll_device(ap_dev, &flags);
850 spin_unlock_bh(&ap_dev->lock);
851 } while ((flags & 1) || (flags & 2));
852
853 spin_lock_bh(&ap_dev->lock); 1228 spin_lock_bh(&ap_dev->lock);
854 ap_dev->unregistered = 1; 1229 ap_dev->state = AP_STATE_SUSPEND_WAIT;
1230 while (ap_sm_event(ap_dev, AP_EVENT_POLL) != AP_WAIT_NONE)
1231 ;
1232 ap_dev->state = AP_STATE_BORKED;
855 spin_unlock_bh(&ap_dev->lock); 1233 spin_unlock_bh(&ap_dev->lock);
1234 return 0;
1235}
856 1236
1237static int ap_dev_resume(struct device *dev)
1238{
857 return 0; 1239 return 0;
858} 1240}
859 1241
860static int ap_bus_resume(struct device *dev) 1242static void ap_bus_suspend(void)
1243{
1244 ap_suspend_flag = 1;
1245 /*
1246 * Disable scanning for devices, thus we do not want to scan
1247 * for them after removing.
1248 */
1249 flush_work(&ap_scan_work);
1250 tasklet_disable(&ap_tasklet);
1251}
1252
1253static int __ap_devices_unregister(struct device *dev, void *dummy)
1254{
1255 device_unregister(dev);
1256 return 0;
1257}
1258
1259static void ap_bus_resume(void)
861{ 1260{
862 struct ap_device *ap_dev = to_ap_dev(dev);
863 int rc; 1261 int rc;
864 1262
865 if (ap_suspend_flag) { 1263 /* Unconditionally remove all AP devices */
866 ap_suspend_flag = 0; 1264 bus_for_each_dev(&ap_bus_type, NULL, NULL, __ap_devices_unregister);
867 if (ap_interrupts_available()) { 1265 /* Reset thin interrupt setting */
868 if (!ap_using_interrupts()) { 1266 if (ap_interrupts_available() && !ap_using_interrupts()) {
869 rc = register_adapter_interrupt(&ap_airq); 1267 rc = register_adapter_interrupt(&ap_airq);
870 ap_airq_flag = (rc == 0); 1268 ap_airq_flag = (rc == 0);
871 }
872 } else {
873 if (ap_using_interrupts()) {
874 unregister_adapter_interrupt(&ap_airq);
875 ap_airq_flag = 0;
876 }
877 }
878 ap_query_configuration();
879 if (!user_set_domain) {
880 ap_domain_index = -1;
881 ap_select_domain();
882 }
883 init_timer(&ap_config_timer);
884 ap_config_timer.function = ap_config_timeout;
885 ap_config_timer.data = 0;
886 ap_config_timer.expires = jiffies + ap_config_time * HZ;
887 add_timer(&ap_config_timer);
888 ap_work_queue = create_singlethread_workqueue("kapwork");
889 if (!ap_work_queue)
890 return -ENOMEM;
891 tasklet_enable(&ap_tasklet);
892 if (!ap_using_interrupts())
893 ap_schedule_poll_timer();
894 else
895 tasklet_schedule(&ap_tasklet);
896 if (ap_thread_flag)
897 rc = ap_poll_thread_start();
898 else
899 rc = 0;
900 } else
901 rc = 0;
902 if (AP_QID_QUEUE(ap_dev->qid) != ap_domain_index) {
903 spin_lock_bh(&ap_dev->lock);
904 ap_dev->qid = AP_MKQID(AP_QID_DEVICE(ap_dev->qid),
905 ap_domain_index);
906 spin_unlock_bh(&ap_dev->lock);
907 } 1269 }
908 queue_work(ap_work_queue, &ap_config_work); 1270 if (!ap_interrupts_available() && ap_using_interrupts()) {
1271 unregister_adapter_interrupt(&ap_airq);
1272 ap_airq_flag = 0;
1273 }
1274 /* Reset domain */
1275 if (!user_set_domain)
1276 ap_domain_index = -1;
1277 /* Get things going again */
1278 ap_suspend_flag = 0;
1279 if (ap_airq_flag)
1280 xchg(ap_airq.lsi_ptr, 0);
1281 tasklet_enable(&ap_tasklet);
1282 queue_work(system_long_wq, &ap_scan_work);
1283}
909 1284
910 return rc; 1285static int ap_power_event(struct notifier_block *this, unsigned long event,
1286 void *ptr)
1287{
1288 switch (event) {
1289 case PM_HIBERNATION_PREPARE:
1290 case PM_SUSPEND_PREPARE:
1291 ap_bus_suspend();
1292 break;
1293 case PM_POST_HIBERNATION:
1294 case PM_POST_SUSPEND:
1295 ap_bus_resume();
1296 break;
1297 default:
1298 break;
1299 }
1300 return NOTIFY_DONE;
911} 1301}
1302static struct notifier_block ap_power_notifier = {
1303 .notifier_call = ap_power_event,
1304};
912 1305
913static struct bus_type ap_bus_type = { 1306static struct bus_type ap_bus_type = {
914 .name = "ap", 1307 .name = "ap",
915 .match = &ap_bus_match, 1308 .match = &ap_bus_match,
916 .uevent = &ap_uevent, 1309 .uevent = &ap_uevent,
917 .suspend = ap_bus_suspend, 1310 .suspend = ap_dev_suspend,
918 .resume = ap_bus_resume 1311 .resume = ap_dev_resume,
919}; 1312};
920 1313
921static int ap_device_probe(struct device *dev) 1314static int ap_device_probe(struct device *dev)
@@ -925,21 +1318,9 @@ static int ap_device_probe(struct device *dev)
925 int rc; 1318 int rc;
926 1319
927 ap_dev->drv = ap_drv; 1320 ap_dev->drv = ap_drv;
928
929 spin_lock_bh(&ap_device_list_lock);
930 list_add(&ap_dev->list, &ap_device_list);
931 spin_unlock_bh(&ap_device_list_lock);
932
933 rc = ap_drv->probe ? ap_drv->probe(ap_dev) : -ENODEV; 1321 rc = ap_drv->probe ? ap_drv->probe(ap_dev) : -ENODEV;
934 if (rc) { 1322 if (rc)
935 spin_lock_bh(&ap_device_list_lock); 1323 ap_dev->drv = NULL;
936 list_del_init(&ap_dev->list);
937 spin_unlock_bh(&ap_device_list_lock);
938 } else {
939 if (ap_dev->reset == AP_RESET_IN_PROGRESS ||
940 ap_dev->interrupt == AP_INTR_IN_PROGRESS)
941 __ap_schedule_poll_timer();
942 }
943 return rc; 1324 return rc;
944} 1325}
945 1326
@@ -956,12 +1337,14 @@ static void __ap_flush_queue(struct ap_device *ap_dev)
956 list_for_each_entry_safe(ap_msg, next, &ap_dev->pendingq, list) { 1337 list_for_each_entry_safe(ap_msg, next, &ap_dev->pendingq, list) {
957 list_del_init(&ap_msg->list); 1338 list_del_init(&ap_msg->list);
958 ap_dev->pendingq_count--; 1339 ap_dev->pendingq_count--;
959 ap_msg->receive(ap_dev, ap_msg, ERR_PTR(-ENODEV)); 1340 ap_msg->rc = -EAGAIN;
1341 ap_msg->receive(ap_dev, ap_msg, NULL);
960 } 1342 }
961 list_for_each_entry_safe(ap_msg, next, &ap_dev->requestq, list) { 1343 list_for_each_entry_safe(ap_msg, next, &ap_dev->requestq, list) {
962 list_del_init(&ap_msg->list); 1344 list_del_init(&ap_msg->list);
963 ap_dev->requestq_count--; 1345 ap_dev->requestq_count--;
964 ap_msg->receive(ap_dev, ap_msg, ERR_PTR(-ENODEV)); 1346 ap_msg->rc = -EAGAIN;
1347 ap_msg->receive(ap_dev, ap_msg, NULL);
965 } 1348 }
966} 1349}
967 1350
@@ -991,6 +1374,11 @@ static int ap_device_remove(struct device *dev)
991 return 0; 1374 return 0;
992} 1375}
993 1376
1377static void ap_device_release(struct device *dev)
1378{
1379 kfree(to_ap_dev(dev));
1380}
1381
994int ap_driver_register(struct ap_driver *ap_drv, struct module *owner, 1382int ap_driver_register(struct ap_driver *ap_drv, struct module *owner,
995 char *name) 1383 char *name)
996{ 1384{
@@ -1013,60 +1401,16 @@ EXPORT_SYMBOL(ap_driver_unregister);
1013 1401
1014void ap_bus_force_rescan(void) 1402void ap_bus_force_rescan(void)
1015{ 1403{
1016 /* reconfigure the AP bus rescan timer. */ 1404 if (ap_suspend_flag)
1017 mod_timer(&ap_config_timer, jiffies + ap_config_time * HZ); 1405 return;
1018 /* processing a asynchronous bus rescan */ 1406 /* processing a asynchronous bus rescan */
1019 queue_work(ap_work_queue, &ap_config_work); 1407 del_timer(&ap_config_timer);
1020 flush_work(&ap_config_work); 1408 queue_work(system_long_wq, &ap_scan_work);
1409 flush_work(&ap_scan_work);
1021} 1410}
1022EXPORT_SYMBOL(ap_bus_force_rescan); 1411EXPORT_SYMBOL(ap_bus_force_rescan);
1023 1412
1024/* 1413/*
1025 * ap_test_config(): helper function to extract the nrth bit
1026 * within the unsigned int array field.
1027 */
1028static inline int ap_test_config(unsigned int *field, unsigned int nr)
1029{
1030 if (nr > 0xFFu)
1031 return 0;
1032 return ap_test_bit((field + (nr >> 5)), (nr & 0x1f));
1033}
1034
1035/*
1036 * ap_test_config_card_id(): Test, whether an AP card ID is configured.
1037 * @id AP card ID
1038 *
1039 * Returns 0 if the card is not configured
1040 * 1 if the card is configured or
1041 * if the configuration information is not available
1042 */
1043static inline int ap_test_config_card_id(unsigned int id)
1044{
1045 if (!ap_configuration)
1046 return 1;
1047 return ap_test_config(ap_configuration->apm, id);
1048}
1049
1050/*
1051 * ap_test_config_domain(): Test, whether an AP usage domain is configured.
1052 * @domain AP usage domain ID
1053 *
1054 * Returns 0 if the usage domain is not configured
1055 * 1 if the usage domain is configured or
1056 * if the configuration information is not available
1057 */
1058static inline int ap_test_config_domain(unsigned int domain)
1059{
1060 if (!ap_configuration) /* QCI not supported */
1061 if (domain < 16)
1062 return 1; /* then domains 0...15 are configured */
1063 else
1064 return 0;
1065 else
1066 return ap_test_config(ap_configuration->aqm, domain);
1067}
1068
1069/*
1070 * AP bus attributes. 1414 * AP bus attributes.
1071 */ 1415 */
1072static ssize_t ap_domain_show(struct bus_type *bus, char *buf) 1416static ssize_t ap_domain_show(struct bus_type *bus, char *buf)
@@ -1078,21 +1422,20 @@ static BUS_ATTR(ap_domain, 0444, ap_domain_show, NULL);
1078 1422
1079static ssize_t ap_control_domain_mask_show(struct bus_type *bus, char *buf) 1423static ssize_t ap_control_domain_mask_show(struct bus_type *bus, char *buf)
1080{ 1424{
1081 if (ap_configuration != NULL) { /* QCI not supported */ 1425 if (!ap_configuration) /* QCI not supported */
1082 if (test_facility(76)) { /* format 1 - 256 bit domain field */ 1426 return snprintf(buf, PAGE_SIZE, "not supported\n");
1083 return snprintf(buf, PAGE_SIZE, 1427 if (!test_facility(76))
1084 "0x%08x%08x%08x%08x%08x%08x%08x%08x\n", 1428 /* format 0 - 16 bit domain field */
1429 return snprintf(buf, PAGE_SIZE, "%08x%08x\n",
1430 ap_configuration->adm[0],
1431 ap_configuration->adm[1]);
1432 /* format 1 - 256 bit domain field */
1433 return snprintf(buf, PAGE_SIZE,
1434 "0x%08x%08x%08x%08x%08x%08x%08x%08x\n",
1085 ap_configuration->adm[0], ap_configuration->adm[1], 1435 ap_configuration->adm[0], ap_configuration->adm[1],
1086 ap_configuration->adm[2], ap_configuration->adm[3], 1436 ap_configuration->adm[2], ap_configuration->adm[3],
1087 ap_configuration->adm[4], ap_configuration->adm[5], 1437 ap_configuration->adm[4], ap_configuration->adm[5],
1088 ap_configuration->adm[6], ap_configuration->adm[7]); 1438 ap_configuration->adm[6], ap_configuration->adm[7]);
1089 } else { /* format 0 - 16 bit domain field */
1090 return snprintf(buf, PAGE_SIZE, "%08x%08x\n",
1091 ap_configuration->adm[0], ap_configuration->adm[1]);
1092 }
1093 } else {
1094 return snprintf(buf, PAGE_SIZE, "not supported\n");
1095 }
1096} 1439}
1097 1440
1098static BUS_ATTR(ap_control_domain_mask, 0444, 1441static BUS_ATTR(ap_control_domain_mask, 0444,
@@ -1119,11 +1462,7 @@ static ssize_t ap_config_time_store(struct bus_type *bus,
1119 if (sscanf(buf, "%d\n", &time) != 1 || time < 5 || time > 120) 1462 if (sscanf(buf, "%d\n", &time) != 1 || time < 5 || time > 120)
1120 return -EINVAL; 1463 return -EINVAL;
1121 ap_config_time = time; 1464 ap_config_time = time;
1122 if (!timer_pending(&ap_config_timer) || 1465 mod_timer(&ap_config_timer, jiffies + ap_config_time * HZ);
1123 !mod_timer(&ap_config_timer, jiffies + ap_config_time * HZ)) {
1124 ap_config_timer.expires = jiffies + ap_config_time * HZ;
1125 add_timer(&ap_config_timer);
1126 }
1127 return count; 1466 return count;
1128} 1467}
1129 1468
@@ -1144,9 +1483,8 @@ static ssize_t ap_poll_thread_store(struct bus_type *bus,
1144 if (flag) { 1483 if (flag) {
1145 rc = ap_poll_thread_start(); 1484 rc = ap_poll_thread_start();
1146 if (rc) 1485 if (rc)
1147 return rc; 1486 count = rc;
1148 } 1487 } else
1149 else
1150 ap_poll_thread_stop(); 1488 ap_poll_thread_stop();
1151 return count; 1489 return count;
1152} 1490}
@@ -1184,35 +1522,12 @@ static BUS_ATTR(poll_timeout, 0644, poll_timeout_show, poll_timeout_store);
1184 1522
1185static ssize_t ap_max_domain_id_show(struct bus_type *bus, char *buf) 1523static ssize_t ap_max_domain_id_show(struct bus_type *bus, char *buf)
1186{ 1524{
1187 ap_qid_t qid; 1525 int max_domain_id;
1188 int i, nd, max_domain_id = -1; 1526
1189 unsigned long fbits; 1527 if (ap_configuration)
1190 1528 max_domain_id = ap_max_domain_id ? : -1;
1191 if (ap_configuration) { 1529 else
1192 if (ap_domain_index >= 0 && ap_domain_index < AP_DOMAINS) {
1193 for (i = 0; i < AP_DEVICES; i++) {
1194 if (!ap_test_config_card_id(i))
1195 continue;
1196 qid = AP_MKQID(i, ap_domain_index);
1197 fbits = ap_query_facilities(qid);
1198 if (fbits & (1UL << 57)) {
1199 /* the N bit is 0, Nd field is filled */
1200 nd = (int)((fbits & 0x00FF0000UL)>>16);
1201 if (nd > 0)
1202 max_domain_id = nd;
1203 else
1204 max_domain_id = 15;
1205 } else {
1206 /* N bit is 1, max 16 domains */
1207 max_domain_id = 15;
1208 }
1209 break;
1210 }
1211 }
1212 } else {
1213 /* no APXA support, older machines with max 16 domains */
1214 max_domain_id = 15; 1530 max_domain_id = 15;
1215 }
1216 return snprintf(buf, PAGE_SIZE, "%d\n", max_domain_id); 1531 return snprintf(buf, PAGE_SIZE, "%d\n", max_domain_id);
1217} 1532}
1218 1533
@@ -1230,44 +1545,22 @@ static struct bus_attribute *const ap_bus_attrs[] = {
1230}; 1545};
1231 1546
1232/** 1547/**
1233 * ap_query_configuration(): Query AP configuration information.
1234 *
1235 * Query information of installed cards and configured domains from AP.
1236 */
1237static void ap_query_configuration(void)
1238{
1239 if (ap_configuration_available()) {
1240 if (!ap_configuration)
1241 ap_configuration =
1242 kzalloc(sizeof(struct ap_config_info),
1243 GFP_KERNEL);
1244 if (ap_configuration)
1245 __ap_query_configuration(ap_configuration);
1246 } else
1247 ap_configuration = NULL;
1248}
1249
1250/**
1251 * ap_select_domain(): Select an AP domain. 1548 * ap_select_domain(): Select an AP domain.
1252 * 1549 *
1253 * Pick one of the 16 AP domains. 1550 * Pick one of the 16 AP domains.
1254 */ 1551 */
1255static int ap_select_domain(void) 1552static int ap_select_domain(void)
1256{ 1553{
1257 int queue_depth, device_type, count, max_count, best_domain; 1554 int count, max_count, best_domain;
1258 ap_qid_t qid; 1555 struct ap_queue_status status;
1259 int rc, i, j; 1556 int i, j;
1260
1261 /* IF APXA isn't installed, only 16 domains could be defined */
1262 if (!ap_configuration->ap_extended && (ap_domain_index > 15))
1263 return -EINVAL;
1264 1557
1265 /* 1558 /*
1266 * We want to use a single domain. Either the one specified with 1559 * We want to use a single domain. Either the one specified with
1267 * the "domain=" parameter or the domain with the maximum number 1560 * the "domain=" parameter or the domain with the maximum number
1268 * of devices. 1561 * of devices.
1269 */ 1562 */
1270 if (ap_domain_index >= 0 && ap_domain_index < AP_DOMAINS) 1563 if (ap_domain_index >= 0)
1271 /* Domain has already been selected. */ 1564 /* Domain has already been selected. */
1272 return 0; 1565 return 0;
1273 best_domain = -1; 1566 best_domain = -1;
@@ -1279,9 +1572,8 @@ static int ap_select_domain(void)
1279 for (j = 0; j < AP_DEVICES; j++) { 1572 for (j = 0; j < AP_DEVICES; j++) {
1280 if (!ap_test_config_card_id(j)) 1573 if (!ap_test_config_card_id(j))
1281 continue; 1574 continue;
1282 qid = AP_MKQID(j, i); 1575 status = ap_test_queue(AP_MKQID(j, i), NULL);
1283 rc = ap_query_queue(qid, &queue_depth, &device_type); 1576 if (status.response_code != AP_RESPONSE_NORMAL)
1284 if (rc)
1285 continue; 1577 continue;
1286 count++; 1578 count++;
1287 } 1579 }
@@ -1298,109 +1590,6 @@ static int ap_select_domain(void)
1298} 1590}
1299 1591
1300/** 1592/**
1301 * ap_probe_device_type(): Find the device type of an AP.
1302 * @ap_dev: pointer to the AP device.
1303 *
1304 * Find the device type if query queue returned a device type of 0.
1305 */
1306static int ap_probe_device_type(struct ap_device *ap_dev)
1307{
1308 static unsigned char msg[] = {
1309 0x00,0x06,0x00,0x00,0x00,0x00,0x00,0x00,
1310 0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,
1311 0x00,0x00,0x00,0x58,0x00,0x00,0x00,0x00,
1312 0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,
1313 0x01,0x00,0x43,0x43,0x41,0x2d,0x41,0x50,
1314 0x50,0x4c,0x20,0x20,0x20,0x01,0x01,0x01,
1315 0x00,0x00,0x00,0x00,0x50,0x4b,0x00,0x00,
1316 0x00,0x00,0x01,0x1c,0x00,0x00,0x00,0x00,
1317 0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,
1318 0x00,0x00,0x05,0xb8,0x00,0x00,0x00,0x00,
1319 0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,
1320 0x70,0x00,0x41,0x00,0x00,0x00,0x00,0x00,
1321 0x00,0x00,0x54,0x32,0x01,0x00,0xa0,0x00,
1322 0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,
1323 0x00,0x00,0x00,0x00,0xb8,0x05,0x00,0x00,
1324 0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,
1325 0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,
1326 0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,
1327 0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,
1328 0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,
1329 0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,
1330 0x00,0x00,0x0a,0x00,0x00,0x00,0x00,0x00,
1331 0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,
1332 0x00,0x00,0x00,0x00,0x00,0x00,0x08,0x00,
1333 0x49,0x43,0x53,0x46,0x20,0x20,0x20,0x20,
1334 0x50,0x4b,0x0a,0x00,0x50,0x4b,0x43,0x53,
1335 0x2d,0x31,0x2e,0x32,0x37,0x00,0x11,0x22,
1336 0x33,0x44,0x55,0x66,0x77,0x88,0x99,0x00,
1337 0x11,0x22,0x33,0x44,0x55,0x66,0x77,0x88,
1338 0x99,0x00,0x11,0x22,0x33,0x44,0x55,0x66,
1339 0x77,0x88,0x99,0x00,0x11,0x22,0x33,0x44,
1340 0x55,0x66,0x77,0x88,0x99,0x00,0x11,0x22,
1341 0x33,0x44,0x55,0x66,0x77,0x88,0x99,0x00,
1342 0x11,0x22,0x33,0x5d,0x00,0x5b,0x00,0x77,
1343 0x88,0x1e,0x00,0x00,0x57,0x00,0x00,0x00,
1344 0x00,0x04,0x00,0x00,0x4f,0x00,0x00,0x00,
1345 0x03,0x02,0x00,0x00,0x40,0x01,0x00,0x01,
1346 0xce,0x02,0x68,0x2d,0x5f,0xa9,0xde,0x0c,
1347 0xf6,0xd2,0x7b,0x58,0x4b,0xf9,0x28,0x68,
1348 0x3d,0xb4,0xf4,0xef,0x78,0xd5,0xbe,0x66,
1349 0x63,0x42,0xef,0xf8,0xfd,0xa4,0xf8,0xb0,
1350 0x8e,0x29,0xc2,0xc9,0x2e,0xd8,0x45,0xb8,
1351 0x53,0x8c,0x6f,0x4e,0x72,0x8f,0x6c,0x04,
1352 0x9c,0x88,0xfc,0x1e,0xc5,0x83,0x55,0x57,
1353 0xf7,0xdd,0xfd,0x4f,0x11,0x36,0x95,0x5d,
1354 };
1355 struct ap_queue_status status;
1356 unsigned long long psmid;
1357 char *reply;
1358 int rc, i;
1359
1360 reply = (void *) get_zeroed_page(GFP_KERNEL);
1361 if (!reply) {
1362 rc = -ENOMEM;
1363 goto out;
1364 }
1365
1366 status = __ap_send(ap_dev->qid, 0x0102030405060708ULL,
1367 msg, sizeof(msg), 0);
1368 if (status.response_code != AP_RESPONSE_NORMAL) {
1369 rc = -ENODEV;
1370 goto out_free;
1371 }
1372
1373 /* Wait for the test message to complete. */
1374 for (i = 0; i < 6; i++) {
1375 msleep(300);
1376 status = __ap_recv(ap_dev->qid, &psmid, reply, 4096);
1377 if (status.response_code == AP_RESPONSE_NORMAL &&
1378 psmid == 0x0102030405060708ULL)
1379 break;
1380 }
1381 if (i < 6) {
1382 /* Got an answer. */
1383 if (reply[0] == 0x00 && reply[1] == 0x86)
1384 ap_dev->device_type = AP_DEVICE_TYPE_PCICC;
1385 else
1386 ap_dev->device_type = AP_DEVICE_TYPE_PCICA;
1387 rc = 0;
1388 } else
1389 rc = -ENODEV;
1390
1391out_free:
1392 free_page((unsigned long) reply);
1393out:
1394 return rc;
1395}
1396
1397static void ap_interrupt_handler(struct airq_struct *airq)
1398{
1399 inc_irq_stat(IRQIO_APB);
1400 tasklet_schedule(&ap_tasklet);
1401}
1402
1403/**
1404 * __ap_scan_bus(): Scan the AP bus. 1593 * __ap_scan_bus(): Scan the AP bus.
1405 * @dev: Pointer to device 1594 * @dev: Pointer to device
1406 * @data: Pointer to data 1595 * @data: Pointer to data
@@ -1412,49 +1601,38 @@ static int __ap_scan_bus(struct device *dev, void *data)
1412 return to_ap_dev(dev)->qid == (ap_qid_t)(unsigned long) data; 1601 return to_ap_dev(dev)->qid == (ap_qid_t)(unsigned long) data;
1413} 1602}
1414 1603
1415static void ap_device_release(struct device *dev)
1416{
1417 struct ap_device *ap_dev = to_ap_dev(dev);
1418
1419 kfree(ap_dev);
1420}
1421
1422static void ap_scan_bus(struct work_struct *unused) 1604static void ap_scan_bus(struct work_struct *unused)
1423{ 1605{
1424 struct ap_device *ap_dev; 1606 struct ap_device *ap_dev;
1425 struct device *dev; 1607 struct device *dev;
1426 ap_qid_t qid; 1608 ap_qid_t qid;
1427 int queue_depth = 0, device_type = 0; 1609 int queue_depth = 0, device_type = 0;
1428 unsigned int device_functions; 1610 unsigned int device_functions = 0;
1429 int rc, i; 1611 int rc, i, borked;
1430 1612
1431 ap_query_configuration(); 1613 ap_query_configuration();
1432 if (ap_select_domain() != 0) { 1614 if (ap_select_domain() != 0)
1433 return; 1615 goto out;
1434 } 1616
1435 for (i = 0; i < AP_DEVICES; i++) { 1617 for (i = 0; i < AP_DEVICES; i++) {
1436 qid = AP_MKQID(i, ap_domain_index); 1618 qid = AP_MKQID(i, ap_domain_index);
1437 dev = bus_find_device(&ap_bus_type, NULL, 1619 dev = bus_find_device(&ap_bus_type, NULL,
1438 (void *)(unsigned long)qid, 1620 (void *)(unsigned long)qid,
1439 __ap_scan_bus); 1621 __ap_scan_bus);
1440 if (ap_test_config_card_id(i)) 1622 rc = ap_query_queue(qid, &queue_depth, &device_type,
1441 rc = ap_query_queue(qid, &queue_depth, &device_type); 1623 &device_functions);
1442 else
1443 rc = -ENODEV;
1444 if (dev) { 1624 if (dev) {
1445 ap_dev = to_ap_dev(dev); 1625 ap_dev = to_ap_dev(dev);
1446 spin_lock_bh(&ap_dev->lock); 1626 spin_lock_bh(&ap_dev->lock);
1447 if (rc == -ENODEV || ap_dev->unregistered) { 1627 if (rc == -ENODEV)
1448 spin_unlock_bh(&ap_dev->lock); 1628 ap_dev->state = AP_STATE_BORKED;
1449 if (ap_dev->unregistered) 1629 borked = ap_dev->state == AP_STATE_BORKED;
1450 i--;
1451 device_unregister(dev);
1452 put_device(dev);
1453 continue;
1454 }
1455 spin_unlock_bh(&ap_dev->lock); 1630 spin_unlock_bh(&ap_dev->lock);
1631 if (borked) /* Remove broken device */
1632 device_unregister(dev);
1456 put_device(dev); 1633 put_device(dev);
1457 continue; 1634 if (!borked)
1635 continue;
1458 } 1636 }
1459 if (rc) 1637 if (rc)
1460 continue; 1638 continue;
@@ -1462,525 +1640,72 @@ static void ap_scan_bus(struct work_struct *unused)
1462 if (!ap_dev) 1640 if (!ap_dev)
1463 break; 1641 break;
1464 ap_dev->qid = qid; 1642 ap_dev->qid = qid;
1465 rc = ap_init_queue(ap_dev); 1643 ap_dev->state = AP_STATE_RESET_START;
1466 if ((rc != 0) && (rc != -EBUSY)) { 1644 ap_dev->interrupt = AP_INTR_DISABLED;
1467 kfree(ap_dev);
1468 continue;
1469 }
1470 ap_dev->queue_depth = queue_depth; 1645 ap_dev->queue_depth = queue_depth;
1471 ap_dev->unregistered = 1; 1646 ap_dev->raw_hwtype = device_type;
1647 ap_dev->device_type = device_type;
1648 ap_dev->functions = device_functions;
1472 spin_lock_init(&ap_dev->lock); 1649 spin_lock_init(&ap_dev->lock);
1473 INIT_LIST_HEAD(&ap_dev->pendingq); 1650 INIT_LIST_HEAD(&ap_dev->pendingq);
1474 INIT_LIST_HEAD(&ap_dev->requestq); 1651 INIT_LIST_HEAD(&ap_dev->requestq);
1475 INIT_LIST_HEAD(&ap_dev->list); 1652 INIT_LIST_HEAD(&ap_dev->list);
1476 setup_timer(&ap_dev->timeout, ap_request_timeout, 1653 setup_timer(&ap_dev->timeout, ap_request_timeout,
1477 (unsigned long) ap_dev); 1654 (unsigned long) ap_dev);
1478 switch (device_type) {
1479 case 0:
1480 /* device type probing for old cards */
1481 if (ap_probe_device_type(ap_dev)) {
1482 kfree(ap_dev);
1483 continue;
1484 }
1485 break;
1486 default:
1487 ap_dev->device_type = device_type;
1488 }
1489 ap_dev->raw_hwtype = device_type;
1490
1491 rc = ap_query_functions(qid, &device_functions);
1492 if (!rc)
1493 ap_dev->functions = device_functions;
1494 else
1495 ap_dev->functions = 0u;
1496 1655
1497 ap_dev->device.bus = &ap_bus_type; 1656 ap_dev->device.bus = &ap_bus_type;
1498 ap_dev->device.parent = ap_root_device; 1657 ap_dev->device.parent = ap_root_device;
1499 if (dev_set_name(&ap_dev->device, "card%02x", 1658 rc = dev_set_name(&ap_dev->device, "card%02x",
1500 AP_QID_DEVICE(ap_dev->qid))) { 1659 AP_QID_DEVICE(ap_dev->qid));
1660 if (rc) {
1501 kfree(ap_dev); 1661 kfree(ap_dev);
1502 continue; 1662 continue;
1503 } 1663 }
1664 /* Add to list of devices */
1665 spin_lock_bh(&ap_device_list_lock);
1666 list_add(&ap_dev->list, &ap_device_list);
1667 spin_unlock_bh(&ap_device_list_lock);
1668 /* Start with a device reset */
1669 spin_lock_bh(&ap_dev->lock);
1670 ap_sm_wait(ap_sm_event(ap_dev, AP_EVENT_POLL));
1671 spin_unlock_bh(&ap_dev->lock);
1672 /* Register device */
1504 ap_dev->device.release = ap_device_release; 1673 ap_dev->device.release = ap_device_release;
1505 rc = device_register(&ap_dev->device); 1674 rc = device_register(&ap_dev->device);
1506 if (rc) { 1675 if (rc) {
1676 spin_lock_bh(&ap_dev->lock);
1677 list_del_init(&ap_dev->list);
1678 spin_unlock_bh(&ap_dev->lock);
1507 put_device(&ap_dev->device); 1679 put_device(&ap_dev->device);
1508 continue; 1680 continue;
1509 } 1681 }
1510 /* Add device attributes. */ 1682 /* Add device attributes. */
1511 rc = sysfs_create_group(&ap_dev->device.kobj, 1683 rc = sysfs_create_group(&ap_dev->device.kobj,
1512 &ap_dev_attr_group); 1684 &ap_dev_attr_group);
1513 if (!rc) { 1685 if (rc) {
1514 spin_lock_bh(&ap_dev->lock);
1515 ap_dev->unregistered = 0;
1516 spin_unlock_bh(&ap_dev->lock);
1517 }
1518 else
1519 device_unregister(&ap_dev->device); 1686 device_unregister(&ap_dev->device);
1520 }
1521}
1522
1523static void
1524ap_config_timeout(unsigned long ptr)
1525{
1526 queue_work(ap_work_queue, &ap_config_work);
1527 ap_config_timer.expires = jiffies + ap_config_time * HZ;
1528 add_timer(&ap_config_timer);
1529}
1530
1531/**
1532 * ap_poll_read(): Receive pending reply messages from an AP device.
1533 * @ap_dev: pointer to the AP device
1534 * @flags: pointer to control flags, bit 2^0 is set if another poll is
1535 * required, bit 2^1 is set if the poll timer needs to get armed
1536 *
1537 * Returns 0 if the device is still present, -ENODEV if not.
1538 */
1539static int ap_poll_read(struct ap_device *ap_dev, unsigned long *flags)
1540{
1541 struct ap_queue_status status;
1542 struct ap_message *ap_msg;
1543
1544 if (ap_dev->queue_count <= 0)
1545 return 0;
1546 status = __ap_recv(ap_dev->qid, &ap_dev->reply->psmid,
1547 ap_dev->reply->message, ap_dev->reply->length);
1548 switch (status.response_code) {
1549 case AP_RESPONSE_NORMAL:
1550 ap_dev->interrupt = status.int_enabled;
1551 atomic_dec(&ap_poll_requests);
1552 ap_decrease_queue_count(ap_dev);
1553 list_for_each_entry(ap_msg, &ap_dev->pendingq, list) {
1554 if (ap_msg->psmid != ap_dev->reply->psmid)
1555 continue;
1556 list_del_init(&ap_msg->list);
1557 ap_dev->pendingq_count--;
1558 ap_msg->receive(ap_dev, ap_msg, ap_dev->reply);
1559 break;
1560 }
1561 if (ap_dev->queue_count > 0)
1562 *flags |= 1;
1563 break;
1564 case AP_RESPONSE_NO_PENDING_REPLY:
1565 ap_dev->interrupt = status.int_enabled;
1566 if (status.queue_empty) {
1567 /* The card shouldn't forget requests but who knows. */
1568 atomic_sub(ap_dev->queue_count, &ap_poll_requests);
1569 ap_dev->queue_count = 0;
1570 list_splice_init(&ap_dev->pendingq, &ap_dev->requestq);
1571 ap_dev->requestq_count += ap_dev->pendingq_count;
1572 ap_dev->pendingq_count = 0;
1573 } else
1574 *flags |= 2;
1575 break;
1576 default:
1577 return -ENODEV;
1578 }
1579 return 0;
1580}
1581
1582/**
1583 * ap_poll_write(): Send messages from the request queue to an AP device.
1584 * @ap_dev: pointer to the AP device
1585 * @flags: pointer to control flags, bit 2^0 is set if another poll is
1586 * required, bit 2^1 is set if the poll timer needs to get armed
1587 *
1588 * Returns 0 if the device is still present, -ENODEV if not.
1589 */
1590static int ap_poll_write(struct ap_device *ap_dev, unsigned long *flags)
1591{
1592 struct ap_queue_status status;
1593 struct ap_message *ap_msg;
1594
1595 if (ap_dev->requestq_count <= 0 ||
1596 (ap_dev->queue_count >= ap_dev->queue_depth) ||
1597 (ap_dev->reset == AP_RESET_IN_PROGRESS))
1598 return 0;
1599 /* Start the next request on the queue. */
1600 ap_msg = list_entry(ap_dev->requestq.next, struct ap_message, list);
1601 status = __ap_send(ap_dev->qid, ap_msg->psmid,
1602 ap_msg->message, ap_msg->length, ap_msg->special);
1603 switch (status.response_code) {
1604 case AP_RESPONSE_NORMAL:
1605 atomic_inc(&ap_poll_requests);
1606 ap_increase_queue_count(ap_dev);
1607 list_move_tail(&ap_msg->list, &ap_dev->pendingq);
1608 ap_dev->requestq_count--;
1609 ap_dev->pendingq_count++;
1610 if (ap_dev->queue_count < ap_dev->queue_depth &&
1611 ap_dev->requestq_count > 0)
1612 *flags |= 1;
1613 *flags |= 2;
1614 break;
1615 case AP_RESPONSE_RESET_IN_PROGRESS:
1616 __ap_schedule_poll_timer();
1617 case AP_RESPONSE_Q_FULL:
1618 *flags |= 2;
1619 break;
1620 case AP_RESPONSE_MESSAGE_TOO_BIG:
1621 case AP_RESPONSE_REQ_FAC_NOT_INST:
1622 return -EINVAL;
1623 default:
1624 return -ENODEV;
1625 }
1626 return 0;
1627}
1628
1629/**
1630 * ap_poll_queue(): Poll AP device for pending replies and send new messages.
1631 * Check if the queue has a pending reset. In case it's done re-enable
1632 * interrupts, otherwise reschedule the poll_timer for another attempt.
1633 * @ap_dev: pointer to the bus device
1634 * @flags: pointer to control flags, bit 2^0 is set if another poll is
1635 * required, bit 2^1 is set if the poll timer needs to get armed
1636 *
1637 * Poll AP device for pending replies and send new messages. If either
1638 * ap_poll_read or ap_poll_write returns -ENODEV unregister the device.
1639 * Returns 0.
1640 */
1641static inline int ap_poll_queue(struct ap_device *ap_dev, unsigned long *flags)
1642{
1643 int rc, depth, type;
1644 struct ap_queue_status status;
1645
1646
1647 if (ap_dev->reset == AP_RESET_IN_PROGRESS) {
1648 status = ap_test_queue(ap_dev->qid, &depth, &type);
1649 switch (status.response_code) {
1650 case AP_RESPONSE_NORMAL:
1651 ap_dev->reset = AP_RESET_IGNORE;
1652 if (ap_using_interrupts()) {
1653 rc = ap_queue_enable_interruption(
1654 ap_dev, ap_airq.lsi_ptr);
1655 if (!rc)
1656 ap_dev->interrupt = AP_INTR_IN_PROGRESS;
1657 else if (rc == -ENODEV) {
1658 pr_err("Registering adapter interrupts for "
1659 "AP %d failed\n", AP_QID_DEVICE(ap_dev->qid));
1660 return rc;
1661 }
1662 }
1663 /* fall through */
1664 case AP_RESPONSE_BUSY:
1665 case AP_RESPONSE_RESET_IN_PROGRESS:
1666 *flags |= AP_POLL_AFTER_TIMEOUT;
1667 break;
1668 case AP_RESPONSE_Q_NOT_AVAIL:
1669 case AP_RESPONSE_DECONFIGURED:
1670 case AP_RESPONSE_CHECKSTOPPED:
1671 return -ENODEV;
1672 default:
1673 break;
1674 }
1675 }
1676
1677 if ((ap_dev->reset != AP_RESET_IN_PROGRESS) &&
1678 (ap_dev->interrupt == AP_INTR_IN_PROGRESS)) {
1679 status = ap_test_queue(ap_dev->qid, &depth, &type);
1680 if (ap_using_interrupts()) {
1681 if (status.int_enabled == 1)
1682 ap_dev->interrupt = AP_INTR_ENABLED;
1683 else
1684 *flags |= AP_POLL_AFTER_TIMEOUT;
1685 } else
1686 ap_dev->interrupt = AP_INTR_DISABLED;
1687 }
1688
1689 rc = ap_poll_read(ap_dev, flags);
1690 if (rc)
1691 return rc;
1692 return ap_poll_write(ap_dev, flags);
1693}
1694
1695/**
1696 * __ap_queue_message(): Queue a message to a device.
1697 * @ap_dev: pointer to the AP device
1698 * @ap_msg: the message to be queued
1699 *
1700 * Queue a message to a device. Returns 0 if successful.
1701 */
1702static int __ap_queue_message(struct ap_device *ap_dev, struct ap_message *ap_msg)
1703{
1704 struct ap_queue_status status;
1705
1706 if (list_empty(&ap_dev->requestq) &&
1707 (ap_dev->queue_count < ap_dev->queue_depth) &&
1708 (ap_dev->reset != AP_RESET_IN_PROGRESS)) {
1709 status = __ap_send(ap_dev->qid, ap_msg->psmid,
1710 ap_msg->message, ap_msg->length,
1711 ap_msg->special);
1712 switch (status.response_code) {
1713 case AP_RESPONSE_NORMAL:
1714 list_add_tail(&ap_msg->list, &ap_dev->pendingq);
1715 atomic_inc(&ap_poll_requests);
1716 ap_dev->pendingq_count++;
1717 ap_increase_queue_count(ap_dev);
1718 ap_dev->total_request_count++;
1719 break;
1720 case AP_RESPONSE_Q_FULL:
1721 case AP_RESPONSE_RESET_IN_PROGRESS:
1722 list_add_tail(&ap_msg->list, &ap_dev->requestq);
1723 ap_dev->requestq_count++;
1724 ap_dev->total_request_count++;
1725 return -EBUSY;
1726 case AP_RESPONSE_REQ_FAC_NOT_INST:
1727 case AP_RESPONSE_MESSAGE_TOO_BIG:
1728 ap_msg->receive(ap_dev, ap_msg, ERR_PTR(-EINVAL));
1729 return -EINVAL;
1730 default: /* Device is gone. */
1731 ap_msg->receive(ap_dev, ap_msg, ERR_PTR(-ENODEV));
1732 return -ENODEV;
1733 }
1734 } else {
1735 list_add_tail(&ap_msg->list, &ap_dev->requestq);
1736 ap_dev->requestq_count++;
1737 ap_dev->total_request_count++;
1738 return -EBUSY;
1739 }
1740 ap_schedule_poll_timer();
1741 return 0;
1742}
1743
1744void ap_queue_message(struct ap_device *ap_dev, struct ap_message *ap_msg)
1745{
1746 unsigned long flags;
1747 int rc;
1748
1749 /* For asynchronous message handling a valid receive-callback
1750 * is required. */
1751 BUG_ON(!ap_msg->receive);
1752
1753 spin_lock_bh(&ap_dev->lock);
1754 if (!ap_dev->unregistered) {
1755 /* Make room on the queue by polling for finished requests. */
1756 rc = ap_poll_queue(ap_dev, &flags);
1757 if (!rc)
1758 rc = __ap_queue_message(ap_dev, ap_msg);
1759 if (!rc)
1760 wake_up(&ap_poll_wait);
1761 if (rc == -ENODEV)
1762 ap_dev->unregistered = 1;
1763 } else {
1764 ap_msg->receive(ap_dev, ap_msg, ERR_PTR(-ENODEV));
1765 rc = -ENODEV;
1766 }
1767 spin_unlock_bh(&ap_dev->lock);
1768 if (rc == -ENODEV)
1769 device_unregister(&ap_dev->device);
1770}
1771EXPORT_SYMBOL(ap_queue_message);
1772
1773/**
1774 * ap_cancel_message(): Cancel a crypto request.
1775 * @ap_dev: The AP device that has the message queued
1776 * @ap_msg: The message that is to be removed
1777 *
1778 * Cancel a crypto request. This is done by removing the request
1779 * from the device pending or request queue. Note that the
1780 * request stays on the AP queue. When it finishes the message
1781 * reply will be discarded because the psmid can't be found.
1782 */
1783void ap_cancel_message(struct ap_device *ap_dev, struct ap_message *ap_msg)
1784{
1785 struct ap_message *tmp;
1786
1787 spin_lock_bh(&ap_dev->lock);
1788 if (!list_empty(&ap_msg->list)) {
1789 list_for_each_entry(tmp, &ap_dev->pendingq, list)
1790 if (tmp->psmid == ap_msg->psmid) {
1791 ap_dev->pendingq_count--;
1792 goto found;
1793 }
1794 ap_dev->requestq_count--;
1795 found:
1796 list_del_init(&ap_msg->list);
1797 }
1798 spin_unlock_bh(&ap_dev->lock);
1799}
1800EXPORT_SYMBOL(ap_cancel_message);
1801
1802/**
1803 * ap_poll_timeout(): AP receive polling for finished AP requests.
1804 * @unused: Unused pointer.
1805 *
1806 * Schedules the AP tasklet using a high resolution timer.
1807 */
1808static enum hrtimer_restart ap_poll_timeout(struct hrtimer *unused)
1809{
1810 tasklet_schedule(&ap_tasklet);
1811 return HRTIMER_NORESTART;
1812}
1813
1814/**
1815 * ap_reset(): Reset a not responding AP device.
1816 * @ap_dev: Pointer to the AP device
1817 *
1818 * Reset a not responding AP device and move all requests from the
1819 * pending queue to the request queue.
1820 */
1821static void ap_reset(struct ap_device *ap_dev, unsigned long *flags)
1822{
1823 int rc;
1824
1825 atomic_sub(ap_dev->queue_count, &ap_poll_requests);
1826 ap_dev->queue_count = 0;
1827 list_splice_init(&ap_dev->pendingq, &ap_dev->requestq);
1828 ap_dev->requestq_count += ap_dev->pendingq_count;
1829 ap_dev->pendingq_count = 0;
1830 rc = ap_init_queue(ap_dev);
1831 if (rc == -ENODEV)
1832 ap_dev->unregistered = 1;
1833 else
1834 *flags |= AP_POLL_AFTER_TIMEOUT;
1835}
1836
1837static int __ap_poll_device(struct ap_device *ap_dev, unsigned long *flags)
1838{
1839 if (!ap_dev->unregistered) {
1840 if (ap_poll_queue(ap_dev, flags))
1841 ap_dev->unregistered = 1;
1842 if (ap_dev->reset == AP_RESET_DO)
1843 ap_reset(ap_dev, flags);
1844 }
1845 return 0;
1846}
1847
1848/**
1849 * ap_poll_all(): Poll all AP devices.
1850 * @dummy: Unused variable
1851 *
1852 * Poll all AP devices on the bus in a round robin fashion. Continue
1853 * polling until bit 2^0 of the control flags is not set. If bit 2^1
1854 * of the control flags has been set arm the poll timer.
1855 */
1856static void ap_poll_all(unsigned long dummy)
1857{
1858 unsigned long flags;
1859 struct ap_device *ap_dev;
1860
1861 /* Reset the indicator if interrupts are used. Thus new interrupts can
1862 * be received. Doing it in the beginning of the tasklet is therefor
1863 * important that no requests on any AP get lost.
1864 */
1865 if (ap_using_interrupts())
1866 xchg(ap_airq.lsi_ptr, 0);
1867 do {
1868 flags = 0;
1869 spin_lock(&ap_device_list_lock);
1870 list_for_each_entry(ap_dev, &ap_device_list, list) {
1871 spin_lock(&ap_dev->lock);
1872 __ap_poll_device(ap_dev, &flags);
1873 spin_unlock(&ap_dev->lock);
1874 }
1875 spin_unlock(&ap_device_list_lock);
1876 } while (flags & AP_POLL_IMMEDIATELY);
1877 if (flags & AP_POLL_AFTER_TIMEOUT)
1878 __ap_schedule_poll_timer();
1879}
1880
1881/**
1882 * ap_poll_thread(): Thread that polls for finished requests.
1883 * @data: Unused pointer
1884 *
1885 * AP bus poll thread. The purpose of this thread is to poll for
1886 * finished requests in a loop if there is a "free" cpu - that is
1887 * a cpu that doesn't have anything better to do. The polling stops
1888 * as soon as there is another task or if all messages have been
1889 * delivered.
1890 */
1891static int ap_poll_thread(void *data)
1892{
1893 DECLARE_WAITQUEUE(wait, current);
1894 unsigned long flags;
1895 int requests;
1896 struct ap_device *ap_dev;
1897
1898 set_user_nice(current, MAX_NICE);
1899 while (1) {
1900 if (ap_suspend_flag)
1901 return 0;
1902 if (need_resched()) {
1903 schedule();
1904 continue; 1687 continue;
1905 } 1688 }
1906 add_wait_queue(&ap_poll_wait, &wait);
1907 set_current_state(TASK_INTERRUPTIBLE);
1908 if (kthread_should_stop())
1909 break;
1910 requests = atomic_read(&ap_poll_requests);
1911 if (requests <= 0)
1912 schedule();
1913 set_current_state(TASK_RUNNING);
1914 remove_wait_queue(&ap_poll_wait, &wait);
1915
1916 flags = 0;
1917 spin_lock_bh(&ap_device_list_lock);
1918 list_for_each_entry(ap_dev, &ap_device_list, list) {
1919 spin_lock(&ap_dev->lock);
1920 __ap_poll_device(ap_dev, &flags);
1921 spin_unlock(&ap_dev->lock);
1922 }
1923 spin_unlock_bh(&ap_device_list_lock);
1924 } 1689 }
1925 set_current_state(TASK_RUNNING); 1690out:
1926 remove_wait_queue(&ap_poll_wait, &wait); 1691 mod_timer(&ap_config_timer, jiffies + ap_config_time * HZ);
1927 return 0;
1928}
1929
1930static int ap_poll_thread_start(void)
1931{
1932 int rc;
1933
1934 if (ap_using_interrupts() || ap_suspend_flag)
1935 return 0;
1936 mutex_lock(&ap_poll_thread_mutex);
1937 if (!ap_poll_kthread) {
1938 ap_poll_kthread = kthread_run(ap_poll_thread, NULL, "appoll");
1939 rc = PTR_RET(ap_poll_kthread);
1940 if (rc)
1941 ap_poll_kthread = NULL;
1942 }
1943 else
1944 rc = 0;
1945 mutex_unlock(&ap_poll_thread_mutex);
1946 return rc;
1947}
1948
1949static void ap_poll_thread_stop(void)
1950{
1951 mutex_lock(&ap_poll_thread_mutex);
1952 if (ap_poll_kthread) {
1953 kthread_stop(ap_poll_kthread);
1954 ap_poll_kthread = NULL;
1955 }
1956 mutex_unlock(&ap_poll_thread_mutex);
1957} 1692}
1958 1693
1959/** 1694static void ap_config_timeout(unsigned long ptr)
1960 * ap_request_timeout(): Handling of request timeouts
1961 * @data: Holds the AP device.
1962 *
1963 * Handles request timeouts.
1964 */
1965static void ap_request_timeout(unsigned long data)
1966{ 1695{
1967 struct ap_device *ap_dev = (struct ap_device *) data; 1696 if (ap_suspend_flag)
1968 1697 return;
1969 if (ap_dev->reset == AP_RESET_ARMED) { 1698 queue_work(system_long_wq, &ap_scan_work);
1970 ap_dev->reset = AP_RESET_DO;
1971
1972 if (ap_using_interrupts())
1973 tasklet_schedule(&ap_tasklet);
1974 }
1975} 1699}
1976 1700
1977static void ap_reset_domain(void) 1701static void ap_reset_domain(void)
1978{ 1702{
1979 int i; 1703 int i;
1980 1704
1981 if ((ap_domain_index != -1) && (ap_test_config_domain(ap_domain_index))) 1705 if (ap_domain_index == -1 || !ap_test_config_domain(ap_domain_index))
1982 for (i = 0; i < AP_DEVICES; i++) 1706 return;
1983 ap_reset_queue(AP_MKQID(i, ap_domain_index)); 1707 for (i = 0; i < AP_DEVICES; i++)
1708 ap_reset_queue(AP_MKQID(i, ap_domain_index));
1984} 1709}
1985 1710
1986static void ap_reset_all(void) 1711static void ap_reset_all(void)
@@ -2009,11 +1734,24 @@ static struct reset_call ap_reset_call = {
2009 */ 1734 */
2010int __init ap_module_init(void) 1735int __init ap_module_init(void)
2011{ 1736{
1737 int max_domain_id;
2012 int rc, i; 1738 int rc, i;
2013 1739
2014 if (ap_domain_index < -1 || ap_domain_index >= AP_DOMAINS) { 1740 if (ap_instructions_available() != 0) {
2015 pr_warning("%d is not a valid cryptographic domain\n", 1741 pr_warn("The hardware system does not support AP instructions\n");
2016 ap_domain_index); 1742 return -ENODEV;
1743 }
1744
1745 /* Get AP configuration data if available */
1746 ap_init_configuration();
1747
1748 if (ap_configuration)
1749 max_domain_id = ap_max_domain_id ? : (AP_DOMAINS - 1);
1750 else
1751 max_domain_id = 15;
1752 if (ap_domain_index < -1 || ap_domain_index > max_domain_id) {
1753 pr_warn("%d is not a valid cryptographic domain\n",
1754 ap_domain_index);
2017 return -EINVAL; 1755 return -EINVAL;
2018 } 1756 }
2019 /* In resume callback we need to know if the user had set the domain. 1757 /* In resume callback we need to know if the user had set the domain.
@@ -2022,11 +1760,6 @@ int __init ap_module_init(void)
2022 if (ap_domain_index >= 0) 1760 if (ap_domain_index >= 0)
2023 user_set_domain = 1; 1761 user_set_domain = 1;
2024 1762
2025 if (ap_instructions_available() != 0) {
2026 pr_warning("The hardware system does not support "
2027 "AP instructions\n");
2028 return -ENODEV;
2029 }
2030 if (ap_interrupts_available()) { 1763 if (ap_interrupts_available()) {
2031 rc = register_adapter_interrupt(&ap_airq); 1764 rc = register_adapter_interrupt(&ap_airq);
2032 ap_airq_flag = (rc == 0); 1765 ap_airq_flag = (rc == 0);
@@ -2050,24 +1783,11 @@ int __init ap_module_init(void)
2050 if (rc) 1783 if (rc)
2051 goto out_bus; 1784 goto out_bus;
2052 1785
2053 ap_work_queue = create_singlethread_workqueue("kapwork");
2054 if (!ap_work_queue) {
2055 rc = -ENOMEM;
2056 goto out_root;
2057 }
2058
2059 ap_query_configuration();
2060 if (ap_select_domain() == 0)
2061 ap_scan_bus(NULL);
2062
2063 /* Setup the AP bus rescan timer. */ 1786 /* Setup the AP bus rescan timer. */
2064 init_timer(&ap_config_timer); 1787 setup_timer(&ap_config_timer, ap_config_timeout, 0);
2065 ap_config_timer.function = ap_config_timeout;
2066 ap_config_timer.data = 0;
2067 ap_config_timer.expires = jiffies + ap_config_time * HZ;
2068 add_timer(&ap_config_timer);
2069 1788
2070 /* Setup the high resultion poll timer. 1789 /*
1790 * Setup the high resultion poll timer.
2071 * If we are running under z/VM adjust polling to z/VM polling rate. 1791 * If we are running under z/VM adjust polling to z/VM polling rate.
2072 */ 1792 */
2073 if (MACHINE_IS_VM) 1793 if (MACHINE_IS_VM)
@@ -2083,13 +1803,18 @@ int __init ap_module_init(void)
2083 goto out_work; 1803 goto out_work;
2084 } 1804 }
2085 1805
1806 rc = register_pm_notifier(&ap_power_notifier);
1807 if (rc)
1808 goto out_pm;
1809
1810 queue_work(system_long_wq, &ap_scan_work);
1811
2086 return 0; 1812 return 0;
2087 1813
1814out_pm:
1815 ap_poll_thread_stop();
2088out_work: 1816out_work:
2089 del_timer_sync(&ap_config_timer);
2090 hrtimer_cancel(&ap_poll_timer); 1817 hrtimer_cancel(&ap_poll_timer);
2091 destroy_workqueue(ap_work_queue);
2092out_root:
2093 root_device_unregister(ap_root_device); 1818 root_device_unregister(ap_root_device);
2094out_bus: 1819out_bus:
2095 while (i--) 1820 while (i--)
@@ -2099,14 +1824,10 @@ out:
2099 unregister_reset_call(&ap_reset_call); 1824 unregister_reset_call(&ap_reset_call);
2100 if (ap_using_interrupts()) 1825 if (ap_using_interrupts())
2101 unregister_adapter_interrupt(&ap_airq); 1826 unregister_adapter_interrupt(&ap_airq);
1827 kfree(ap_configuration);
2102 return rc; 1828 return rc;
2103} 1829}
2104 1830
2105static int __ap_match_all(struct device *dev, void *data)
2106{
2107 return 1;
2108}
2109
2110/** 1831/**
2111 * ap_modules_exit(): The module termination code 1832 * ap_modules_exit(): The module termination code
2112 * 1833 *
@@ -2115,24 +1836,19 @@ static int __ap_match_all(struct device *dev, void *data)
2115void ap_module_exit(void) 1836void ap_module_exit(void)
2116{ 1837{
2117 int i; 1838 int i;
2118 struct device *dev;
2119 1839
2120 ap_reset_domain(); 1840 ap_reset_domain();
2121 ap_poll_thread_stop(); 1841 ap_poll_thread_stop();
2122 del_timer_sync(&ap_config_timer); 1842 del_timer_sync(&ap_config_timer);
2123 hrtimer_cancel(&ap_poll_timer); 1843 hrtimer_cancel(&ap_poll_timer);
2124 destroy_workqueue(ap_work_queue);
2125 tasklet_kill(&ap_tasklet); 1844 tasklet_kill(&ap_tasklet);
2126 while ((dev = bus_find_device(&ap_bus_type, NULL, NULL, 1845 bus_for_each_dev(&ap_bus_type, NULL, NULL, __ap_devices_unregister);
2127 __ap_match_all)))
2128 {
2129 device_unregister(dev);
2130 put_device(dev);
2131 }
2132 for (i = 0; ap_bus_attrs[i]; i++) 1846 for (i = 0; ap_bus_attrs[i]; i++)
2133 bus_remove_file(&ap_bus_type, ap_bus_attrs[i]); 1847 bus_remove_file(&ap_bus_type, ap_bus_attrs[i]);
1848 unregister_pm_notifier(&ap_power_notifier);
2134 root_device_unregister(ap_root_device); 1849 root_device_unregister(ap_root_device);
2135 bus_unregister(&ap_bus_type); 1850 bus_unregister(&ap_bus_type);
1851 kfree(ap_configuration);
2136 unregister_reset_call(&ap_reset_call); 1852 unregister_reset_call(&ap_reset_call);
2137 if (ap_using_interrupts()) 1853 if (ap_using_interrupts())
2138 unregister_adapter_interrupt(&ap_airq); 1854 unregister_adapter_interrupt(&ap_airq);
diff --git a/drivers/s390/crypto/ap_bus.h b/drivers/s390/crypto/ap_bus.h
index 00468c8d0781..6adcbdf225d1 100644
--- a/drivers/s390/crypto/ap_bus.h
+++ b/drivers/s390/crypto/ap_bus.h
@@ -36,9 +36,6 @@
36#define AP_CONFIG_TIME 30 /* Time in seconds between AP bus rescans. */ 36#define AP_CONFIG_TIME 30 /* Time in seconds between AP bus rescans. */
37#define AP_POLL_TIME 1 /* Time in ticks between receive polls. */ 37#define AP_POLL_TIME 1 /* Time in ticks between receive polls. */
38 38
39#define AP_POLL_IMMEDIATELY 1 /* continue running poll tasklet */
40#define AP_POLL_AFTER_TIMEOUT 2 /* run poll tasklet again after timout */
41
42extern int ap_domain_index; 39extern int ap_domain_index;
43 40
44/** 41/**
@@ -75,21 +72,9 @@ struct ap_queue_status {
75 unsigned int pad2 : 16; 72 unsigned int pad2 : 16;
76} __packed; 73} __packed;
77 74
78#define AP_QUEUE_STATUS_INVALID \
79 { 1, 1, 1, 0xF, 1, 0xFF, 0xFFFF }
80
81static inline
82int ap_queue_status_invalid_test(struct ap_queue_status *status)
83{
84 struct ap_queue_status invalid = AP_QUEUE_STATUS_INVALID;
85 return !(memcmp(status, &invalid, sizeof(struct ap_queue_status)));
86}
87 75
88#define AP_MAX_BITS 31
89static inline int ap_test_bit(unsigned int *ptr, unsigned int nr) 76static inline int ap_test_bit(unsigned int *ptr, unsigned int nr)
90{ 77{
91 if (nr > AP_MAX_BITS)
92 return 0;
93 return (*ptr & (0x80000000u >> nr)) != 0; 78 return (*ptr & (0x80000000u >> nr)) != 0;
94} 79}
95 80
@@ -132,19 +117,45 @@ static inline int ap_test_bit(unsigned int *ptr, unsigned int nr)
132#define AP_FUNC_APXA 6 117#define AP_FUNC_APXA 6
133 118
134/* 119/*
135 * AP reset flag states
136 */
137#define AP_RESET_IGNORE 0 /* request timeout will be ignored */
138#define AP_RESET_ARMED 1 /* request timeout timer is active */
139#define AP_RESET_DO 2 /* AP reset required */
140#define AP_RESET_IN_PROGRESS 3 /* AP reset in progress */
141
142/*
143 * AP interrupt states 120 * AP interrupt states
144 */ 121 */
145#define AP_INTR_DISABLED 0 /* AP interrupt disabled */ 122#define AP_INTR_DISABLED 0 /* AP interrupt disabled */
146#define AP_INTR_ENABLED 1 /* AP interrupt enabled */ 123#define AP_INTR_ENABLED 1 /* AP interrupt enabled */
147#define AP_INTR_IN_PROGRESS 3 /* AP interrupt in progress */ 124
125/*
126 * AP device states
127 */
128enum ap_state {
129 AP_STATE_RESET_START,
130 AP_STATE_RESET_WAIT,
131 AP_STATE_SETIRQ_WAIT,
132 AP_STATE_IDLE,
133 AP_STATE_WORKING,
134 AP_STATE_QUEUE_FULL,
135 AP_STATE_SUSPEND_WAIT,
136 AP_STATE_BORKED,
137 NR_AP_STATES
138};
139
140/*
141 * AP device events
142 */
143enum ap_event {
144 AP_EVENT_POLL,
145 AP_EVENT_TIMEOUT,
146 NR_AP_EVENTS
147};
148
149/*
150 * AP wait behaviour
151 */
152enum ap_wait {
153 AP_WAIT_AGAIN, /* retry immediately */
154 AP_WAIT_TIMEOUT, /* wait for timeout */
155 AP_WAIT_INTERRUPT, /* wait for thin interrupt (if available) */
156 AP_WAIT_NONE, /* no wait */
157 NR_AP_WAIT
158};
148 159
149struct ap_device; 160struct ap_device;
150struct ap_message; 161struct ap_message;
@@ -163,20 +174,22 @@ struct ap_driver {
163int ap_driver_register(struct ap_driver *, struct module *, char *); 174int ap_driver_register(struct ap_driver *, struct module *, char *);
164void ap_driver_unregister(struct ap_driver *); 175void ap_driver_unregister(struct ap_driver *);
165 176
177typedef enum ap_wait (ap_func_t)(struct ap_device *ap_dev);
178
166struct ap_device { 179struct ap_device {
167 struct device device; 180 struct device device;
168 struct ap_driver *drv; /* Pointer to AP device driver. */ 181 struct ap_driver *drv; /* Pointer to AP device driver. */
169 spinlock_t lock; /* Per device lock. */ 182 spinlock_t lock; /* Per device lock. */
170 struct list_head list; /* private list of all AP devices. */ 183 struct list_head list; /* private list of all AP devices. */
171 184
185 enum ap_state state; /* State of the AP device. */
186
172 ap_qid_t qid; /* AP queue id. */ 187 ap_qid_t qid; /* AP queue id. */
173 int queue_depth; /* AP queue depth.*/ 188 int queue_depth; /* AP queue depth.*/
174 int device_type; /* AP device type. */ 189 int device_type; /* AP device type. */
175 int raw_hwtype; /* AP raw hardware type. */ 190 int raw_hwtype; /* AP raw hardware type. */
176 unsigned int functions; /* AP device function bitfield. */ 191 unsigned int functions; /* AP device function bitfield. */
177 int unregistered; /* marks AP device as unregistered */
178 struct timer_list timeout; /* Timer for request timeouts. */ 192 struct timer_list timeout; /* Timer for request timeouts. */
179 int reset; /* Reset required after req. timeout. */
180 193
181 int interrupt; /* indicate if interrupts are enabled */ 194 int interrupt; /* indicate if interrupts are enabled */
182 int queue_count; /* # messages currently on AP queue. */ 195 int queue_count; /* # messages currently on AP queue. */
@@ -199,6 +212,7 @@ struct ap_message {
199 unsigned long long psmid; /* Message id. */ 212 unsigned long long psmid; /* Message id. */
200 void *message; /* Pointer to message buffer. */ 213 void *message; /* Pointer to message buffer. */
201 size_t length; /* Message length. */ 214 size_t length; /* Message length. */
215 int rc; /* Return code for this message */
202 216
203 void *private; /* ap driver private pointer. */ 217 void *private; /* ap driver private pointer. */
204 unsigned int special:1; /* Used for special commands. */ 218 unsigned int special:1; /* Used for special commands. */
@@ -231,6 +245,7 @@ static inline void ap_init_message(struct ap_message *ap_msg)
231{ 245{
232 ap_msg->psmid = 0; 246 ap_msg->psmid = 0;
233 ap_msg->length = 0; 247 ap_msg->length = 0;
248 ap_msg->rc = 0;
234 ap_msg->special = 0; 249 ap_msg->special = 0;
235 ap_msg->receive = NULL; 250 ap_msg->receive = NULL;
236} 251}
diff --git a/drivers/s390/crypto/zcrypt_api.c b/drivers/s390/crypto/zcrypt_api.c
index 4eb45546a3aa..a9603ebbc1f8 100644
--- a/drivers/s390/crypto/zcrypt_api.c
+++ b/drivers/s390/crypto/zcrypt_api.c
@@ -472,8 +472,7 @@ static long zcrypt_rsa_crt(struct ica_rsa_modexpo_crt *crt)
472 unsigned long long z1, z2, z3; 472 unsigned long long z1, z2, z3;
473 int rc, copied; 473 int rc, copied;
474 474
475 if (crt->outputdatalength < crt->inputdatalength || 475 if (crt->outputdatalength < crt->inputdatalength)
476 (crt->inputdatalength & 1))
477 return -EINVAL; 476 return -EINVAL;
478 /* 477 /*
479 * As long as outputdatalength is big enough, we can set the 478 * As long as outputdatalength is big enough, we can set the
diff --git a/drivers/s390/crypto/zcrypt_cca_key.h b/drivers/s390/crypto/zcrypt_cca_key.h
index 1f42f103c761..ca0cdbe46368 100644
--- a/drivers/s390/crypto/zcrypt_cca_key.h
+++ b/drivers/s390/crypto/zcrypt_cca_key.h
@@ -291,7 +291,7 @@ static inline int zcrypt_type6_crt_key(struct ica_rsa_modexpo_crt *crt,
291 291
292 memset(key, 0, sizeof(*key)); 292 memset(key, 0, sizeof(*key));
293 293
294 short_len = crt->inputdatalength / 2; 294 short_len = (crt->inputdatalength + 1) / 2;
295 long_len = short_len + 8; 295 long_len = short_len + 8;
296 pad_len = -(3*long_len + 2*short_len) & 7; 296 pad_len = -(3*long_len + 2*short_len) & 7;
297 key_len = 3*long_len + 2*short_len + pad_len + crt->inputdatalength; 297 key_len = 3*long_len + 2*short_len + pad_len + crt->inputdatalength;
diff --git a/drivers/s390/crypto/zcrypt_msgtype50.c b/drivers/s390/crypto/zcrypt_msgtype50.c
index 334e282f255b..71ceee9137a8 100644
--- a/drivers/s390/crypto/zcrypt_msgtype50.c
+++ b/drivers/s390/crypto/zcrypt_msgtype50.c
@@ -248,7 +248,7 @@ static int ICACRT_msg_to_type50CRT_msg(struct zcrypt_device *zdev,
248 unsigned char *p, *q, *dp, *dq, *u, *inp; 248 unsigned char *p, *q, *dp, *dq, *u, *inp;
249 249
250 mod_len = crt->inputdatalength; 250 mod_len = crt->inputdatalength;
251 short_len = mod_len / 2; 251 short_len = (mod_len + 1) / 2;
252 252
253 /* 253 /*
254 * CEX2A and CEX3A w/o FW update can handle requests up to 254 * CEX2A and CEX3A w/o FW update can handle requests up to
@@ -395,10 +395,8 @@ static void zcrypt_cex2a_receive(struct ap_device *ap_dev,
395 int length; 395 int length;
396 396
397 /* Copy the reply message to the request message buffer. */ 397 /* Copy the reply message to the request message buffer. */
398 if (IS_ERR(reply)) { 398 if (!reply)
399 memcpy(msg->message, &error_reply, sizeof(error_reply)); 399 goto out; /* ap_msg->rc indicates the error */
400 goto out;
401 }
402 t80h = reply->message; 400 t80h = reply->message;
403 if (t80h->type == TYPE80_RSP_CODE) { 401 if (t80h->type == TYPE80_RSP_CODE) {
404 if (ap_dev->device_type == AP_DEVICE_TYPE_CEX2A) 402 if (ap_dev->device_type == AP_DEVICE_TYPE_CEX2A)
@@ -449,10 +447,12 @@ static long zcrypt_cex2a_modexpo(struct zcrypt_device *zdev,
449 init_completion(&work); 447 init_completion(&work);
450 ap_queue_message(zdev->ap_dev, &ap_msg); 448 ap_queue_message(zdev->ap_dev, &ap_msg);
451 rc = wait_for_completion_interruptible(&work); 449 rc = wait_for_completion_interruptible(&work);
452 if (rc == 0) 450 if (rc == 0) {
453 rc = convert_response(zdev, &ap_msg, mex->outputdata, 451 rc = ap_msg.rc;
454 mex->outputdatalength); 452 if (rc == 0)
455 else 453 rc = convert_response(zdev, &ap_msg, mex->outputdata,
454 mex->outputdatalength);
455 } else
456 /* Signal pending. */ 456 /* Signal pending. */
457 ap_cancel_message(zdev->ap_dev, &ap_msg); 457 ap_cancel_message(zdev->ap_dev, &ap_msg);
458out_free: 458out_free:
@@ -493,10 +493,12 @@ static long zcrypt_cex2a_modexpo_crt(struct zcrypt_device *zdev,
493 init_completion(&work); 493 init_completion(&work);
494 ap_queue_message(zdev->ap_dev, &ap_msg); 494 ap_queue_message(zdev->ap_dev, &ap_msg);
495 rc = wait_for_completion_interruptible(&work); 495 rc = wait_for_completion_interruptible(&work);
496 if (rc == 0) 496 if (rc == 0) {
497 rc = convert_response(zdev, &ap_msg, crt->outputdata, 497 rc = ap_msg.rc;
498 crt->outputdatalength); 498 if (rc == 0)
499 else 499 rc = convert_response(zdev, &ap_msg, crt->outputdata,
500 crt->outputdatalength);
501 } else
500 /* Signal pending. */ 502 /* Signal pending. */
501 ap_cancel_message(zdev->ap_dev, &ap_msg); 503 ap_cancel_message(zdev->ap_dev, &ap_msg);
502out_free: 504out_free:
diff --git a/drivers/s390/crypto/zcrypt_msgtype6.c b/drivers/s390/crypto/zcrypt_msgtype6.c
index 46b324ce6c7a..74762214193b 100644
--- a/drivers/s390/crypto/zcrypt_msgtype6.c
+++ b/drivers/s390/crypto/zcrypt_msgtype6.c
@@ -829,10 +829,8 @@ static void zcrypt_msgtype6_receive(struct ap_device *ap_dev,
829 int length; 829 int length;
830 830
831 /* Copy the reply message to the request message buffer. */ 831 /* Copy the reply message to the request message buffer. */
832 if (IS_ERR(reply)) { 832 if (!reply)
833 memcpy(msg->message, &error_reply, sizeof(error_reply)); 833 goto out; /* ap_msg->rc indicates the error */
834 goto out;
835 }
836 t86r = reply->message; 834 t86r = reply->message;
837 if (t86r->hdr.type == TYPE86_RSP_CODE && 835 if (t86r->hdr.type == TYPE86_RSP_CODE &&
838 t86r->cprbx.cprb_ver_id == 0x02) { 836 t86r->cprbx.cprb_ver_id == 0x02) {
@@ -880,10 +878,8 @@ static void zcrypt_msgtype6_receive_ep11(struct ap_device *ap_dev,
880 int length; 878 int length;
881 879
882 /* Copy the reply message to the request message buffer. */ 880 /* Copy the reply message to the request message buffer. */
883 if (IS_ERR(reply)) { 881 if (!reply)
884 memcpy(msg->message, &error_reply, sizeof(error_reply)); 882 goto out; /* ap_msg->rc indicates the error */
885 goto out;
886 }
887 t86r = reply->message; 883 t86r = reply->message;
888 if (t86r->hdr.type == TYPE86_RSP_CODE && 884 if (t86r->hdr.type == TYPE86_RSP_CODE &&
889 t86r->cprbx.cprb_ver_id == 0x04) { 885 t86r->cprbx.cprb_ver_id == 0x04) {
@@ -935,10 +931,13 @@ static long zcrypt_msgtype6_modexpo(struct zcrypt_device *zdev,
935 init_completion(&resp_type.work); 931 init_completion(&resp_type.work);
936 ap_queue_message(zdev->ap_dev, &ap_msg); 932 ap_queue_message(zdev->ap_dev, &ap_msg);
937 rc = wait_for_completion_interruptible(&resp_type.work); 933 rc = wait_for_completion_interruptible(&resp_type.work);
938 if (rc == 0) 934 if (rc == 0) {
939 rc = convert_response_ica(zdev, &ap_msg, mex->outputdata, 935 rc = ap_msg.rc;
940 mex->outputdatalength); 936 if (rc == 0)
941 else 937 rc = convert_response_ica(zdev, &ap_msg,
938 mex->outputdata,
939 mex->outputdatalength);
940 } else
942 /* Signal pending. */ 941 /* Signal pending. */
943 ap_cancel_message(zdev->ap_dev, &ap_msg); 942 ap_cancel_message(zdev->ap_dev, &ap_msg);
944out_free: 943out_free:
@@ -976,10 +975,13 @@ static long zcrypt_msgtype6_modexpo_crt(struct zcrypt_device *zdev,
976 init_completion(&resp_type.work); 975 init_completion(&resp_type.work);
977 ap_queue_message(zdev->ap_dev, &ap_msg); 976 ap_queue_message(zdev->ap_dev, &ap_msg);
978 rc = wait_for_completion_interruptible(&resp_type.work); 977 rc = wait_for_completion_interruptible(&resp_type.work);
979 if (rc == 0) 978 if (rc == 0) {
980 rc = convert_response_ica(zdev, &ap_msg, crt->outputdata, 979 rc = ap_msg.rc;
981 crt->outputdatalength); 980 if (rc == 0)
982 else 981 rc = convert_response_ica(zdev, &ap_msg,
982 crt->outputdata,
983 crt->outputdatalength);
984 } else
983 /* Signal pending. */ 985 /* Signal pending. */
984 ap_cancel_message(zdev->ap_dev, &ap_msg); 986 ap_cancel_message(zdev->ap_dev, &ap_msg);
985out_free: 987out_free:
@@ -1017,9 +1019,11 @@ static long zcrypt_msgtype6_send_cprb(struct zcrypt_device *zdev,
1017 init_completion(&resp_type.work); 1019 init_completion(&resp_type.work);
1018 ap_queue_message(zdev->ap_dev, &ap_msg); 1020 ap_queue_message(zdev->ap_dev, &ap_msg);
1019 rc = wait_for_completion_interruptible(&resp_type.work); 1021 rc = wait_for_completion_interruptible(&resp_type.work);
1020 if (rc == 0) 1022 if (rc == 0) {
1021 rc = convert_response_xcrb(zdev, &ap_msg, xcRB); 1023 rc = ap_msg.rc;
1022 else 1024 if (rc == 0)
1025 rc = convert_response_xcrb(zdev, &ap_msg, xcRB);
1026 } else
1023 /* Signal pending. */ 1027 /* Signal pending. */
1024 ap_cancel_message(zdev->ap_dev, &ap_msg); 1028 ap_cancel_message(zdev->ap_dev, &ap_msg);
1025out_free: 1029out_free:
@@ -1057,9 +1061,12 @@ static long zcrypt_msgtype6_send_ep11_cprb(struct zcrypt_device *zdev,
1057 init_completion(&resp_type.work); 1061 init_completion(&resp_type.work);
1058 ap_queue_message(zdev->ap_dev, &ap_msg); 1062 ap_queue_message(zdev->ap_dev, &ap_msg);
1059 rc = wait_for_completion_interruptible(&resp_type.work); 1063 rc = wait_for_completion_interruptible(&resp_type.work);
1060 if (rc == 0) 1064 if (rc == 0) {
1061 rc = convert_response_ep11_xcrb(zdev, &ap_msg, xcrb); 1065 rc = ap_msg.rc;
1062 else /* Signal pending. */ 1066 if (rc == 0)
1067 rc = convert_response_ep11_xcrb(zdev, &ap_msg, xcrb);
1068 } else
1069 /* Signal pending. */
1063 ap_cancel_message(zdev->ap_dev, &ap_msg); 1070 ap_cancel_message(zdev->ap_dev, &ap_msg);
1064 1071
1065out_free: 1072out_free:
@@ -1096,9 +1103,11 @@ static long zcrypt_msgtype6_rng(struct zcrypt_device *zdev,
1096 init_completion(&resp_type.work); 1103 init_completion(&resp_type.work);
1097 ap_queue_message(zdev->ap_dev, &ap_msg); 1104 ap_queue_message(zdev->ap_dev, &ap_msg);
1098 rc = wait_for_completion_interruptible(&resp_type.work); 1105 rc = wait_for_completion_interruptible(&resp_type.work);
1099 if (rc == 0) 1106 if (rc == 0) {
1100 rc = convert_response_rng(zdev, &ap_msg, buffer); 1107 rc = ap_msg.rc;
1101 else 1108 if (rc == 0)
1109 rc = convert_response_rng(zdev, &ap_msg, buffer);
1110 } else
1102 /* Signal pending. */ 1111 /* Signal pending. */
1103 ap_cancel_message(zdev->ap_dev, &ap_msg); 1112 ap_cancel_message(zdev->ap_dev, &ap_msg);
1104 kfree(ap_msg.message); 1113 kfree(ap_msg.message);
diff --git a/drivers/s390/crypto/zcrypt_pcica.c b/drivers/s390/crypto/zcrypt_pcica.c
deleted file mode 100644
index 7a743f4c646c..000000000000
--- a/drivers/s390/crypto/zcrypt_pcica.c
+++ /dev/null
@@ -1,420 +0,0 @@
1/*
2 * zcrypt 2.1.0
3 *
4 * Copyright IBM Corp. 2001, 2006
5 * Author(s): Robert Burroughs
6 * Eric Rossman (edrossma@us.ibm.com)
7 *
8 * Hotplug & misc device support: Jochen Roehrig (roehrig@de.ibm.com)
9 * Major cleanup & driver split: Martin Schwidefsky <schwidefsky@de.ibm.com>
10 * Ralph Wuerthner <rwuerthn@de.ibm.com>
11 *
12 * This program is free software; you can redistribute it and/or modify
13 * it under the terms of the GNU General Public License as published by
14 * the Free Software Foundation; either version 2, or (at your option)
15 * any later version.
16 *
17 * This program is distributed in the hope that it will be useful,
18 * but WITHOUT ANY WARRANTY; without even the implied warranty of
19 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
20 * GNU General Public License for more details.
21 *
22 * You should have received a copy of the GNU General Public License
23 * along with this program; if not, write to the Free Software
24 * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
25 */
26
27#define KMSG_COMPONENT "zcrypt"
28#define pr_fmt(fmt) KMSG_COMPONENT ": " fmt
29
30#include <linux/module.h>
31#include <linux/slab.h>
32#include <linux/init.h>
33#include <linux/err.h>
34#include <linux/atomic.h>
35#include <asm/uaccess.h>
36
37#include "ap_bus.h"
38#include "zcrypt_api.h"
39#include "zcrypt_error.h"
40#include "zcrypt_pcica.h"
41
42#define PCICA_MIN_MOD_SIZE 1 /* 8 bits */
43#define PCICA_MAX_MOD_SIZE 256 /* 2048 bits */
44
45#define PCICA_SPEED_RATING 2800
46
47#define PCICA_MAX_MESSAGE_SIZE 0x3a0 /* sizeof(struct type4_lcr) */
48#define PCICA_MAX_RESPONSE_SIZE 0x110 /* max outputdatalength + type80_hdr */
49
50#define PCICA_CLEANUP_TIME (15*HZ)
51
52static struct ap_device_id zcrypt_pcica_ids[] = {
53 { AP_DEVICE(AP_DEVICE_TYPE_PCICA) },
54 { /* end of list */ },
55};
56
57MODULE_DEVICE_TABLE(ap, zcrypt_pcica_ids);
58MODULE_AUTHOR("IBM Corporation");
59MODULE_DESCRIPTION("PCICA Cryptographic Coprocessor device driver, "
60 "Copyright IBM Corp. 2001, 2006");
61MODULE_LICENSE("GPL");
62
63static int zcrypt_pcica_probe(struct ap_device *ap_dev);
64static void zcrypt_pcica_remove(struct ap_device *ap_dev);
65static void zcrypt_pcica_receive(struct ap_device *, struct ap_message *,
66 struct ap_message *);
67
68static struct ap_driver zcrypt_pcica_driver = {
69 .probe = zcrypt_pcica_probe,
70 .remove = zcrypt_pcica_remove,
71 .ids = zcrypt_pcica_ids,
72 .request_timeout = PCICA_CLEANUP_TIME,
73};
74
75/**
76 * Convert a ICAMEX message to a type4 MEX message.
77 *
78 * @zdev: crypto device pointer
79 * @zreq: crypto request pointer
80 * @mex: pointer to user input data
81 *
82 * Returns 0 on success or -EFAULT.
83 */
84static int ICAMEX_msg_to_type4MEX_msg(struct zcrypt_device *zdev,
85 struct ap_message *ap_msg,
86 struct ica_rsa_modexpo *mex)
87{
88 unsigned char *modulus, *exponent, *message;
89 int mod_len;
90
91 mod_len = mex->inputdatalength;
92
93 if (mod_len <= 128) {
94 struct type4_sme *sme = ap_msg->message;
95 memset(sme, 0, sizeof(*sme));
96 ap_msg->length = sizeof(*sme);
97 sme->header.msg_fmt = TYPE4_SME_FMT;
98 sme->header.msg_len = sizeof(*sme);
99 sme->header.msg_type_code = TYPE4_TYPE_CODE;
100 sme->header.request_code = TYPE4_REQU_CODE;
101 modulus = sme->modulus + sizeof(sme->modulus) - mod_len;
102 exponent = sme->exponent + sizeof(sme->exponent) - mod_len;
103 message = sme->message + sizeof(sme->message) - mod_len;
104 } else {
105 struct type4_lme *lme = ap_msg->message;
106 memset(lme, 0, sizeof(*lme));
107 ap_msg->length = sizeof(*lme);
108 lme->header.msg_fmt = TYPE4_LME_FMT;
109 lme->header.msg_len = sizeof(*lme);
110 lme->header.msg_type_code = TYPE4_TYPE_CODE;
111 lme->header.request_code = TYPE4_REQU_CODE;
112 modulus = lme->modulus + sizeof(lme->modulus) - mod_len;
113 exponent = lme->exponent + sizeof(lme->exponent) - mod_len;
114 message = lme->message + sizeof(lme->message) - mod_len;
115 }
116
117 if (copy_from_user(modulus, mex->n_modulus, mod_len) ||
118 copy_from_user(exponent, mex->b_key, mod_len) ||
119 copy_from_user(message, mex->inputdata, mod_len))
120 return -EFAULT;
121 return 0;
122}
123
124/**
125 * Convert a ICACRT message to a type4 CRT message.
126 *
127 * @zdev: crypto device pointer
128 * @zreq: crypto request pointer
129 * @crt: pointer to user input data
130 *
131 * Returns 0 on success or -EFAULT.
132 */
133static int ICACRT_msg_to_type4CRT_msg(struct zcrypt_device *zdev,
134 struct ap_message *ap_msg,
135 struct ica_rsa_modexpo_crt *crt)
136{
137 unsigned char *p, *q, *dp, *dq, *u, *inp;
138 int mod_len, short_len, long_len;
139
140 mod_len = crt->inputdatalength;
141 short_len = mod_len / 2;
142 long_len = mod_len / 2 + 8;
143
144 if (mod_len <= 128) {
145 struct type4_scr *scr = ap_msg->message;
146 memset(scr, 0, sizeof(*scr));
147 ap_msg->length = sizeof(*scr);
148 scr->header.msg_type_code = TYPE4_TYPE_CODE;
149 scr->header.request_code = TYPE4_REQU_CODE;
150 scr->header.msg_fmt = TYPE4_SCR_FMT;
151 scr->header.msg_len = sizeof(*scr);
152 p = scr->p + sizeof(scr->p) - long_len;
153 q = scr->q + sizeof(scr->q) - short_len;
154 dp = scr->dp + sizeof(scr->dp) - long_len;
155 dq = scr->dq + sizeof(scr->dq) - short_len;
156 u = scr->u + sizeof(scr->u) - long_len;
157 inp = scr->message + sizeof(scr->message) - mod_len;
158 } else {
159 struct type4_lcr *lcr = ap_msg->message;
160 memset(lcr, 0, sizeof(*lcr));
161 ap_msg->length = sizeof(*lcr);
162 lcr->header.msg_type_code = TYPE4_TYPE_CODE;
163 lcr->header.request_code = TYPE4_REQU_CODE;
164 lcr->header.msg_fmt = TYPE4_LCR_FMT;
165 lcr->header.msg_len = sizeof(*lcr);
166 p = lcr->p + sizeof(lcr->p) - long_len;
167 q = lcr->q + sizeof(lcr->q) - short_len;
168 dp = lcr->dp + sizeof(lcr->dp) - long_len;
169 dq = lcr->dq + sizeof(lcr->dq) - short_len;
170 u = lcr->u + sizeof(lcr->u) - long_len;
171 inp = lcr->message + sizeof(lcr->message) - mod_len;
172 }
173
174 if (copy_from_user(p, crt->np_prime, long_len) ||
175 copy_from_user(q, crt->nq_prime, short_len) ||
176 copy_from_user(dp, crt->bp_key, long_len) ||
177 copy_from_user(dq, crt->bq_key, short_len) ||
178 copy_from_user(u, crt->u_mult_inv, long_len) ||
179 copy_from_user(inp, crt->inputdata, mod_len))
180 return -EFAULT;
181 return 0;
182}
183
184/**
185 * Copy results from a type 84 reply message back to user space.
186 *
187 * @zdev: crypto device pointer
188 * @reply: reply AP message.
189 * @data: pointer to user output data
190 * @length: size of user output data
191 *
192 * Returns 0 on success or -EFAULT.
193 */
194static int convert_type84(struct zcrypt_device *zdev,
195 struct ap_message *reply,
196 char __user *outputdata,
197 unsigned int outputdatalength)
198{
199 struct type84_hdr *t84h = reply->message;
200 char *data;
201
202 if (t84h->len < sizeof(*t84h) + outputdatalength) {
203 /* The result is too short, the PCICA card may not do that.. */
204 zdev->online = 0;
205 pr_err("Cryptographic device %x failed and was set offline\n",
206 zdev->ap_dev->qid);
207 ZCRYPT_DBF_DEV(DBF_ERR, zdev, "dev%04xo%drc%d",
208 zdev->ap_dev->qid, zdev->online, t84h->code);
209 return -EAGAIN; /* repeat the request on a different device. */
210 }
211 BUG_ON(t84h->len > PCICA_MAX_RESPONSE_SIZE);
212 data = reply->message + t84h->len - outputdatalength;
213 if (copy_to_user(outputdata, data, outputdatalength))
214 return -EFAULT;
215 return 0;
216}
217
218static int convert_response(struct zcrypt_device *zdev,
219 struct ap_message *reply,
220 char __user *outputdata,
221 unsigned int outputdatalength)
222{
223 /* Response type byte is the second byte in the response. */
224 switch (((unsigned char *) reply->message)[1]) {
225 case TYPE82_RSP_CODE:
226 case TYPE88_RSP_CODE:
227 return convert_error(zdev, reply);
228 case TYPE84_RSP_CODE:
229 return convert_type84(zdev, reply,
230 outputdata, outputdatalength);
231 default: /* Unknown response type, this should NEVER EVER happen */
232 zdev->online = 0;
233 pr_err("Cryptographic device %x failed and was set offline\n",
234 zdev->ap_dev->qid);
235 ZCRYPT_DBF_DEV(DBF_ERR, zdev, "dev%04xo%dfail",
236 zdev->ap_dev->qid, zdev->online);
237 return -EAGAIN; /* repeat the request on a different device. */
238 }
239}
240
241/**
242 * This function is called from the AP bus code after a crypto request
243 * "msg" has finished with the reply message "reply".
244 * It is called from tasklet context.
245 * @ap_dev: pointer to the AP device
246 * @msg: pointer to the AP message
247 * @reply: pointer to the AP reply message
248 */
249static void zcrypt_pcica_receive(struct ap_device *ap_dev,
250 struct ap_message *msg,
251 struct ap_message *reply)
252{
253 static struct error_hdr error_reply = {
254 .type = TYPE82_RSP_CODE,
255 .reply_code = REP82_ERROR_MACHINE_FAILURE,
256 };
257 struct type84_hdr *t84h;
258 int length;
259
260 /* Copy the reply message to the request message buffer. */
261 if (IS_ERR(reply)) {
262 memcpy(msg->message, &error_reply, sizeof(error_reply));
263 goto out;
264 }
265 t84h = reply->message;
266 if (t84h->code == TYPE84_RSP_CODE) {
267 length = min(PCICA_MAX_RESPONSE_SIZE, (int) t84h->len);
268 memcpy(msg->message, reply->message, length);
269 } else
270 memcpy(msg->message, reply->message, sizeof error_reply);
271out:
272 complete((struct completion *) msg->private);
273}
274
275static atomic_t zcrypt_step = ATOMIC_INIT(0);
276
277/**
278 * The request distributor calls this function if it picked the PCICA
279 * device to handle a modexpo request.
280 * @zdev: pointer to zcrypt_device structure that identifies the
281 * PCICA device to the request distributor
282 * @mex: pointer to the modexpo request buffer
283 */
284static long zcrypt_pcica_modexpo(struct zcrypt_device *zdev,
285 struct ica_rsa_modexpo *mex)
286{
287 struct ap_message ap_msg;
288 struct completion work;
289 int rc;
290
291 ap_init_message(&ap_msg);
292 ap_msg.message = kmalloc(PCICA_MAX_MESSAGE_SIZE, GFP_KERNEL);
293 if (!ap_msg.message)
294 return -ENOMEM;
295 ap_msg.receive = zcrypt_pcica_receive;
296 ap_msg.psmid = (((unsigned long long) current->pid) << 32) +
297 atomic_inc_return(&zcrypt_step);
298 ap_msg.private = &work;
299 rc = ICAMEX_msg_to_type4MEX_msg(zdev, &ap_msg, mex);
300 if (rc)
301 goto out_free;
302 init_completion(&work);
303 ap_queue_message(zdev->ap_dev, &ap_msg);
304 rc = wait_for_completion_interruptible(&work);
305 if (rc == 0)
306 rc = convert_response(zdev, &ap_msg, mex->outputdata,
307 mex->outputdatalength);
308 else
309 /* Signal pending. */
310 ap_cancel_message(zdev->ap_dev, &ap_msg);
311out_free:
312 kfree(ap_msg.message);
313 return rc;
314}
315
316/**
317 * The request distributor calls this function if it picked the PCICA
318 * device to handle a modexpo_crt request.
319 * @zdev: pointer to zcrypt_device structure that identifies the
320 * PCICA device to the request distributor
321 * @crt: pointer to the modexpoc_crt request buffer
322 */
323static long zcrypt_pcica_modexpo_crt(struct zcrypt_device *zdev,
324 struct ica_rsa_modexpo_crt *crt)
325{
326 struct ap_message ap_msg;
327 struct completion work;
328 int rc;
329
330 ap_init_message(&ap_msg);
331 ap_msg.message = kmalloc(PCICA_MAX_MESSAGE_SIZE, GFP_KERNEL);
332 if (!ap_msg.message)
333 return -ENOMEM;
334 ap_msg.receive = zcrypt_pcica_receive;
335 ap_msg.psmid = (((unsigned long long) current->pid) << 32) +
336 atomic_inc_return(&zcrypt_step);
337 ap_msg.private = &work;
338 rc = ICACRT_msg_to_type4CRT_msg(zdev, &ap_msg, crt);
339 if (rc)
340 goto out_free;
341 init_completion(&work);
342 ap_queue_message(zdev->ap_dev, &ap_msg);
343 rc = wait_for_completion_interruptible(&work);
344 if (rc == 0)
345 rc = convert_response(zdev, &ap_msg, crt->outputdata,
346 crt->outputdatalength);
347 else
348 /* Signal pending. */
349 ap_cancel_message(zdev->ap_dev, &ap_msg);
350out_free:
351 kfree(ap_msg.message);
352 return rc;
353}
354
355/**
356 * The crypto operations for a PCICA card.
357 */
358static struct zcrypt_ops zcrypt_pcica_ops = {
359 .rsa_modexpo = zcrypt_pcica_modexpo,
360 .rsa_modexpo_crt = zcrypt_pcica_modexpo_crt,
361};
362
363/**
364 * Probe function for PCICA cards. It always accepts the AP device
365 * since the bus_match already checked the hardware type.
366 * @ap_dev: pointer to the AP device.
367 */
368static int zcrypt_pcica_probe(struct ap_device *ap_dev)
369{
370 struct zcrypt_device *zdev;
371 int rc;
372
373 zdev = zcrypt_device_alloc(PCICA_MAX_RESPONSE_SIZE);
374 if (!zdev)
375 return -ENOMEM;
376 zdev->ap_dev = ap_dev;
377 zdev->ops = &zcrypt_pcica_ops;
378 zdev->online = 1;
379 zdev->user_space_type = ZCRYPT_PCICA;
380 zdev->type_string = "PCICA";
381 zdev->min_mod_size = PCICA_MIN_MOD_SIZE;
382 zdev->max_mod_size = PCICA_MAX_MOD_SIZE;
383 zdev->speed_rating = PCICA_SPEED_RATING;
384 zdev->max_exp_bit_length = PCICA_MAX_MOD_SIZE;
385 ap_dev->reply = &zdev->reply;
386 ap_dev->private = zdev;
387 rc = zcrypt_device_register(zdev);
388 if (rc)
389 goto out_free;
390 return 0;
391
392out_free:
393 ap_dev->private = NULL;
394 zcrypt_device_free(zdev);
395 return rc;
396}
397
398/**
399 * This is called to remove the extended PCICA driver information
400 * if an AP device is removed.
401 */
402static void zcrypt_pcica_remove(struct ap_device *ap_dev)
403{
404 struct zcrypt_device *zdev = ap_dev->private;
405
406 zcrypt_device_unregister(zdev);
407}
408
409int __init zcrypt_pcica_init(void)
410{
411 return ap_driver_register(&zcrypt_pcica_driver, THIS_MODULE, "pcica");
412}
413
414void zcrypt_pcica_exit(void)
415{
416 ap_driver_unregister(&zcrypt_pcica_driver);
417}
418
419module_init(zcrypt_pcica_init);
420module_exit(zcrypt_pcica_exit);
diff --git a/drivers/s390/crypto/zcrypt_pcica.h b/drivers/s390/crypto/zcrypt_pcica.h
deleted file mode 100644
index 9a59155cad51..000000000000
--- a/drivers/s390/crypto/zcrypt_pcica.h
+++ /dev/null
@@ -1,115 +0,0 @@
1/*
2 * zcrypt 2.1.0
3 *
4 * Copyright IBM Corp. 2001, 2006
5 * Author(s): Robert Burroughs
6 * Eric Rossman (edrossma@us.ibm.com)
7 *
8 * Hotplug & misc device support: Jochen Roehrig (roehrig@de.ibm.com)
9 * Major cleanup & driver split: Martin Schwidefsky <schwidefsky@de.ibm.com>
10 *
11 * This program is free software; you can redistribute it and/or modify
12 * it under the terms of the GNU General Public License as published by
13 * the Free Software Foundation; either version 2, or (at your option)
14 * any later version.
15 *
16 * This program is distributed in the hope that it will be useful,
17 * but WITHOUT ANY WARRANTY; without even the implied warranty of
18 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
19 * GNU General Public License for more details.
20 *
21 * You should have received a copy of the GNU General Public License
22 * along with this program; if not, write to the Free Software
23 * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
24 */
25
26#ifndef _ZCRYPT_PCICA_H_
27#define _ZCRYPT_PCICA_H_
28
29/**
30 * The type 4 message family is associated with a PCICA card.
31 *
32 * The four members of the family are described below.
33 *
34 * Note that all unsigned char arrays are right-justified and left-padded
35 * with zeroes.
36 *
37 * Note that all reserved fields must be zeroes.
38 */
39struct type4_hdr {
40 unsigned char reserved1;
41 unsigned char msg_type_code; /* 0x04 */
42 unsigned short msg_len;
43 unsigned char request_code; /* 0x40 */
44 unsigned char msg_fmt;
45 unsigned short reserved2;
46} __attribute__((packed));
47
48#define TYPE4_TYPE_CODE 0x04
49#define TYPE4_REQU_CODE 0x40
50
51#define TYPE4_SME_FMT 0x00
52#define TYPE4_LME_FMT 0x10
53#define TYPE4_SCR_FMT 0x40
54#define TYPE4_LCR_FMT 0x50
55
56/* Mod-Exp, with a small modulus */
57struct type4_sme {
58 struct type4_hdr header;
59 unsigned char message[128];
60 unsigned char exponent[128];
61 unsigned char modulus[128];
62} __attribute__((packed));
63
64/* Mod-Exp, with a large modulus */
65struct type4_lme {
66 struct type4_hdr header;
67 unsigned char message[256];
68 unsigned char exponent[256];
69 unsigned char modulus[256];
70} __attribute__((packed));
71
72/* CRT, with a small modulus */
73struct type4_scr {
74 struct type4_hdr header;
75 unsigned char message[128];
76 unsigned char dp[72];
77 unsigned char dq[64];
78 unsigned char p[72];
79 unsigned char q[64];
80 unsigned char u[72];
81} __attribute__((packed));
82
83/* CRT, with a large modulus */
84struct type4_lcr {
85 struct type4_hdr header;
86 unsigned char message[256];
87 unsigned char dp[136];
88 unsigned char dq[128];
89 unsigned char p[136];
90 unsigned char q[128];
91 unsigned char u[136];
92} __attribute__((packed));
93
94/**
95 * The type 84 response family is associated with a PCICA card.
96 *
97 * Note that all unsigned char arrays are right-justified and left-padded
98 * with zeroes.
99 *
100 * Note that all reserved fields must be zeroes.
101 */
102
103struct type84_hdr {
104 unsigned char reserved1;
105 unsigned char code;
106 unsigned short len;
107 unsigned char reserved2[4];
108} __attribute__((packed));
109
110#define TYPE84_RSP_CODE 0x84
111
112int zcrypt_pcica_init(void);
113void zcrypt_pcica_exit(void);
114
115#endif /* _ZCRYPT_PCICA_H_ */
diff --git a/drivers/s390/crypto/zcrypt_pcicc.c b/drivers/s390/crypto/zcrypt_pcicc.c
deleted file mode 100644
index 9f18876f058b..000000000000
--- a/drivers/s390/crypto/zcrypt_pcicc.c
+++ /dev/null
@@ -1,627 +0,0 @@
1/*
2 * zcrypt 2.1.0
3 *
4 * Copyright IBM Corp. 2001, 2006
5 * Author(s): Robert Burroughs
6 * Eric Rossman (edrossma@us.ibm.com)
7 *
8 * Hotplug & misc device support: Jochen Roehrig (roehrig@de.ibm.com)
9 * Major cleanup & driver split: Martin Schwidefsky <schwidefsky@de.ibm.com>
10 * Ralph Wuerthner <rwuerthn@de.ibm.com>
11 *
12 * This program is free software; you can redistribute it and/or modify
13 * it under the terms of the GNU General Public License as published by
14 * the Free Software Foundation; either version 2, or (at your option)
15 * any later version.
16 *
17 * This program is distributed in the hope that it will be useful,
18 * but WITHOUT ANY WARRANTY; without even the implied warranty of
19 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
20 * GNU General Public License for more details.
21 *
22 * You should have received a copy of the GNU General Public License
23 * along with this program; if not, write to the Free Software
24 * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
25 */
26
27#define KMSG_COMPONENT "zcrypt"
28#define pr_fmt(fmt) KMSG_COMPONENT ": " fmt
29
30#include <linux/module.h>
31#include <linux/init.h>
32#include <linux/gfp.h>
33#include <linux/err.h>
34#include <linux/atomic.h>
35#include <asm/uaccess.h>
36
37#include "ap_bus.h"
38#include "zcrypt_api.h"
39#include "zcrypt_error.h"
40#include "zcrypt_pcicc.h"
41#include "zcrypt_cca_key.h"
42
43#define PCICC_MIN_MOD_SIZE 64 /* 512 bits */
44#define PCICC_MAX_MOD_SIZE_OLD 128 /* 1024 bits */
45#define PCICC_MAX_MOD_SIZE 256 /* 2048 bits */
46
47/*
48 * PCICC cards need a speed rating of 0. This keeps them at the end of
49 * the zcrypt device list (see zcrypt_api.c). PCICC cards are only
50 * used if no other cards are present because they are slow and can only
51 * cope with PKCS12 padded requests. The logic is queer. PKCS11 padded
52 * requests are rejected. The modexpo function encrypts PKCS12 padded data
53 * and decrypts any non-PKCS12 padded data (except PKCS11) in the assumption
54 * that it's encrypted PKCS12 data. The modexpo_crt function always decrypts
55 * the data in the assumption that its PKCS12 encrypted data.
56 */
57#define PCICC_SPEED_RATING 0
58
59#define PCICC_MAX_MESSAGE_SIZE 0x710 /* max size type6 v1 crt message */
60#define PCICC_MAX_RESPONSE_SIZE 0x710 /* max size type86 v1 reply */
61
62#define PCICC_CLEANUP_TIME (15*HZ)
63
64static struct ap_device_id zcrypt_pcicc_ids[] = {
65 { AP_DEVICE(AP_DEVICE_TYPE_PCICC) },
66 { /* end of list */ },
67};
68
69MODULE_DEVICE_TABLE(ap, zcrypt_pcicc_ids);
70MODULE_AUTHOR("IBM Corporation");
71MODULE_DESCRIPTION("PCICC Cryptographic Coprocessor device driver, "
72 "Copyright IBM Corp. 2001, 2006");
73MODULE_LICENSE("GPL");
74
75static int zcrypt_pcicc_probe(struct ap_device *ap_dev);
76static void zcrypt_pcicc_remove(struct ap_device *ap_dev);
77static void zcrypt_pcicc_receive(struct ap_device *, struct ap_message *,
78 struct ap_message *);
79
80static struct ap_driver zcrypt_pcicc_driver = {
81 .probe = zcrypt_pcicc_probe,
82 .remove = zcrypt_pcicc_remove,
83 .ids = zcrypt_pcicc_ids,
84 .request_timeout = PCICC_CLEANUP_TIME,
85};
86
87/**
88 * The following is used to initialize the CPRB passed to the PCICC card
89 * in a type6 message. The 3 fields that must be filled in at execution
90 * time are req_parml, rpl_parml and usage_domain. Note that all three
91 * fields are *little*-endian. Actually, everything about this interface
92 * is ascii/little-endian, since the device has 'Intel inside'.
93 *
94 * The CPRB is followed immediately by the parm block.
95 * The parm block contains:
96 * - function code ('PD' 0x5044 or 'PK' 0x504B)
97 * - rule block (0x0A00 'PKCS-1.2' or 0x0A00 'ZERO-PAD')
98 * - VUD block
99 */
100static struct CPRB static_cprb = {
101 .cprb_len = cpu_to_le16(0x0070),
102 .cprb_ver_id = 0x41,
103 .func_id = {0x54,0x32},
104 .checkpoint_flag= 0x01,
105 .svr_namel = cpu_to_le16(0x0008),
106 .svr_name = {'I','C','S','F',' ',' ',' ',' '}
107};
108
109/**
110 * Check the message for PKCS11 padding.
111 */
112static inline int is_PKCS11_padded(unsigned char *buffer, int length)
113{
114 int i;
115 if ((buffer[0] != 0x00) || (buffer[1] != 0x01))
116 return 0;
117 for (i = 2; i < length; i++)
118 if (buffer[i] != 0xFF)
119 break;
120 if (i < 10 || i == length)
121 return 0;
122 if (buffer[i] != 0x00)
123 return 0;
124 return 1;
125}
126
127/**
128 * Check the message for PKCS12 padding.
129 */
130static inline int is_PKCS12_padded(unsigned char *buffer, int length)
131{
132 int i;
133 if ((buffer[0] != 0x00) || (buffer[1] != 0x02))
134 return 0;
135 for (i = 2; i < length; i++)
136 if (buffer[i] == 0x00)
137 break;
138 if ((i < 10) || (i == length))
139 return 0;
140 if (buffer[i] != 0x00)
141 return 0;
142 return 1;
143}
144
145/**
146 * Convert a ICAMEX message to a type6 MEX message.
147 *
148 * @zdev: crypto device pointer
149 * @zreq: crypto request pointer
150 * @mex: pointer to user input data
151 *
152 * Returns 0 on success or -EFAULT.
153 */
154static int ICAMEX_msg_to_type6MEX_msg(struct zcrypt_device *zdev,
155 struct ap_message *ap_msg,
156 struct ica_rsa_modexpo *mex)
157{
158 static struct type6_hdr static_type6_hdr = {
159 .type = 0x06,
160 .offset1 = 0x00000058,
161 .agent_id = {0x01,0x00,0x43,0x43,0x41,0x2D,0x41,0x50,
162 0x50,0x4C,0x20,0x20,0x20,0x01,0x01,0x01},
163 .function_code = {'P','K'},
164 };
165 static struct function_and_rules_block static_pke_function_and_rules ={
166 .function_code = {'P','K'},
167 .ulen = cpu_to_le16(10),
168 .only_rule = {'P','K','C','S','-','1','.','2'}
169 };
170 struct {
171 struct type6_hdr hdr;
172 struct CPRB cprb;
173 struct function_and_rules_block fr;
174 unsigned short length;
175 char text[0];
176 } __attribute__((packed)) *msg = ap_msg->message;
177 int vud_len, pad_len, size;
178
179 /* VUD.ciphertext */
180 if (copy_from_user(msg->text, mex->inputdata, mex->inputdatalength))
181 return -EFAULT;
182
183 if (is_PKCS11_padded(msg->text, mex->inputdatalength))
184 return -EINVAL;
185
186 /* static message header and f&r */
187 msg->hdr = static_type6_hdr;
188 msg->fr = static_pke_function_and_rules;
189
190 if (is_PKCS12_padded(msg->text, mex->inputdatalength)) {
191 /* strip the padding and adjust the data length */
192 pad_len = strnlen(msg->text + 2, mex->inputdatalength - 2) + 3;
193 if (pad_len <= 9 || pad_len >= mex->inputdatalength)
194 return -ENODEV;
195 vud_len = mex->inputdatalength - pad_len;
196 memmove(msg->text, msg->text + pad_len, vud_len);
197 msg->length = cpu_to_le16(vud_len + 2);
198
199 /* Set up key after the variable length text. */
200 size = zcrypt_type6_mex_key_en(mex, msg->text + vud_len, 0);
201 if (size < 0)
202 return size;
203 size += sizeof(*msg) + vud_len; /* total size of msg */
204 } else {
205 vud_len = mex->inputdatalength;
206 msg->length = cpu_to_le16(2 + vud_len);
207
208 msg->hdr.function_code[1] = 'D';
209 msg->fr.function_code[1] = 'D';
210
211 /* Set up key after the variable length text. */
212 size = zcrypt_type6_mex_key_de(mex, msg->text + vud_len, 0);
213 if (size < 0)
214 return size;
215 size += sizeof(*msg) + vud_len; /* total size of msg */
216 }
217
218 /* message header, cprb and f&r */
219 msg->hdr.ToCardLen1 = (size - sizeof(msg->hdr) + 3) & -4;
220 msg->hdr.FromCardLen1 = PCICC_MAX_RESPONSE_SIZE - sizeof(msg->hdr);
221
222 msg->cprb = static_cprb;
223 msg->cprb.usage_domain[0]= AP_QID_QUEUE(zdev->ap_dev->qid);
224 msg->cprb.req_parml = cpu_to_le16(size - sizeof(msg->hdr) -
225 sizeof(msg->cprb));
226 msg->cprb.rpl_parml = cpu_to_le16(msg->hdr.FromCardLen1);
227
228 ap_msg->length = (size + 3) & -4;
229 return 0;
230}
231
232/**
233 * Convert a ICACRT message to a type6 CRT message.
234 *
235 * @zdev: crypto device pointer
236 * @zreq: crypto request pointer
237 * @crt: pointer to user input data
238 *
239 * Returns 0 on success or -EFAULT.
240 */
241static int ICACRT_msg_to_type6CRT_msg(struct zcrypt_device *zdev,
242 struct ap_message *ap_msg,
243 struct ica_rsa_modexpo_crt *crt)
244{
245 static struct type6_hdr static_type6_hdr = {
246 .type = 0x06,
247 .offset1 = 0x00000058,
248 .agent_id = {0x01,0x00,0x43,0x43,0x41,0x2D,0x41,0x50,
249 0x50,0x4C,0x20,0x20,0x20,0x01,0x01,0x01},
250 .function_code = {'P','D'},
251 };
252 static struct function_and_rules_block static_pkd_function_and_rules ={
253 .function_code = {'P','D'},
254 .ulen = cpu_to_le16(10),
255 .only_rule = {'P','K','C','S','-','1','.','2'}
256 };
257 struct {
258 struct type6_hdr hdr;
259 struct CPRB cprb;
260 struct function_and_rules_block fr;
261 unsigned short length;
262 char text[0];
263 } __attribute__((packed)) *msg = ap_msg->message;
264 int size;
265
266 /* VUD.ciphertext */
267 msg->length = cpu_to_le16(2 + crt->inputdatalength);
268 if (copy_from_user(msg->text, crt->inputdata, crt->inputdatalength))
269 return -EFAULT;
270
271 if (is_PKCS11_padded(msg->text, crt->inputdatalength))
272 return -EINVAL;
273
274 /* Set up key after the variable length text. */
275 size = zcrypt_type6_crt_key(crt, msg->text + crt->inputdatalength, 0);
276 if (size < 0)
277 return size;
278 size += sizeof(*msg) + crt->inputdatalength; /* total size of msg */
279
280 /* message header, cprb and f&r */
281 msg->hdr = static_type6_hdr;
282 msg->hdr.ToCardLen1 = (size - sizeof(msg->hdr) + 3) & -4;
283 msg->hdr.FromCardLen1 = PCICC_MAX_RESPONSE_SIZE - sizeof(msg->hdr);
284
285 msg->cprb = static_cprb;
286 msg->cprb.usage_domain[0] = AP_QID_QUEUE(zdev->ap_dev->qid);
287 msg->cprb.req_parml = msg->cprb.rpl_parml =
288 cpu_to_le16(size - sizeof(msg->hdr) - sizeof(msg->cprb));
289
290 msg->fr = static_pkd_function_and_rules;
291
292 ap_msg->length = (size + 3) & -4;
293 return 0;
294}
295
296/**
297 * Copy results from a type 86 reply message back to user space.
298 *
299 * @zdev: crypto device pointer
300 * @reply: reply AP message.
301 * @data: pointer to user output data
302 * @length: size of user output data
303 *
304 * Returns 0 on success or -EINVAL, -EFAULT, -EAGAIN in case of an error.
305 */
306struct type86_reply {
307 struct type86_hdr hdr;
308 struct type86_fmt2_ext fmt2;
309 struct CPRB cprb;
310 unsigned char pad[4]; /* 4 byte function code/rules block ? */
311 unsigned short length;
312 char text[0];
313} __attribute__((packed));
314
315static int convert_type86(struct zcrypt_device *zdev,
316 struct ap_message *reply,
317 char __user *outputdata,
318 unsigned int outputdatalength)
319{
320 static unsigned char static_pad[] = {
321 0x00,0x02,
322 0x1B,0x7B,0x5D,0xB5,0x75,0x01,0x3D,0xFD,
323 0x8D,0xD1,0xC7,0x03,0x2D,0x09,0x23,0x57,
324 0x89,0x49,0xB9,0x3F,0xBB,0x99,0x41,0x5B,
325 0x75,0x21,0x7B,0x9D,0x3B,0x6B,0x51,0x39,
326 0xBB,0x0D,0x35,0xB9,0x89,0x0F,0x93,0xA5,
327 0x0B,0x47,0xF1,0xD3,0xBB,0xCB,0xF1,0x9D,
328 0x23,0x73,0x71,0xFF,0xF3,0xF5,0x45,0xFB,
329 0x61,0x29,0x23,0xFD,0xF1,0x29,0x3F,0x7F,
330 0x17,0xB7,0x1B,0xA9,0x19,0xBD,0x57,0xA9,
331 0xD7,0x95,0xA3,0xCB,0xED,0x1D,0xDB,0x45,
332 0x7D,0x11,0xD1,0x51,0x1B,0xED,0x71,0xE9,
333 0xB1,0xD1,0xAB,0xAB,0x21,0x2B,0x1B,0x9F,
334 0x3B,0x9F,0xF7,0xF7,0xBD,0x63,0xEB,0xAD,
335 0xDF,0xB3,0x6F,0x5B,0xDB,0x8D,0xA9,0x5D,
336 0xE3,0x7D,0x77,0x49,0x47,0xF5,0xA7,0xFD,
337 0xAB,0x2F,0x27,0x35,0x77,0xD3,0x49,0xC9,
338 0x09,0xEB,0xB1,0xF9,0xBF,0x4B,0xCB,0x2B,
339 0xEB,0xEB,0x05,0xFF,0x7D,0xC7,0x91,0x8B,
340 0x09,0x83,0xB9,0xB9,0x69,0x33,0x39,0x6B,
341 0x79,0x75,0x19,0xBF,0xBB,0x07,0x1D,0xBD,
342 0x29,0xBF,0x39,0x95,0x93,0x1D,0x35,0xC7,
343 0xC9,0x4D,0xE5,0x97,0x0B,0x43,0x9B,0xF1,
344 0x16,0x93,0x03,0x1F,0xA5,0xFB,0xDB,0xF3,
345 0x27,0x4F,0x27,0x61,0x05,0x1F,0xB9,0x23,
346 0x2F,0xC3,0x81,0xA9,0x23,0x71,0x55,0x55,
347 0xEB,0xED,0x41,0xE5,0xF3,0x11,0xF1,0x43,
348 0x69,0x03,0xBD,0x0B,0x37,0x0F,0x51,0x8F,
349 0x0B,0xB5,0x89,0x5B,0x67,0xA9,0xD9,0x4F,
350 0x01,0xF9,0x21,0x77,0x37,0x73,0x79,0xC5,
351 0x7F,0x51,0xC1,0xCF,0x97,0xA1,0x75,0xAD,
352 0x35,0x9D,0xD3,0xD3,0xA7,0x9D,0x5D,0x41,
353 0x6F,0x65,0x1B,0xCF,0xA9,0x87,0x91,0x09
354 };
355 struct type86_reply *msg = reply->message;
356 unsigned short service_rc, service_rs;
357 unsigned int reply_len, pad_len;
358 char *data;
359
360 service_rc = le16_to_cpu(msg->cprb.ccp_rtcode);
361 if (unlikely(service_rc != 0)) {
362 service_rs = le16_to_cpu(msg->cprb.ccp_rscode);
363 if (service_rc == 8 && service_rs == 66)
364 return -EINVAL;
365 if (service_rc == 8 && service_rs == 65)
366 return -EINVAL;
367 if (service_rc == 8 && service_rs == 770) {
368 zdev->max_mod_size = PCICC_MAX_MOD_SIZE_OLD;
369 return -EAGAIN;
370 }
371 if (service_rc == 8 && service_rs == 783) {
372 zdev->max_mod_size = PCICC_MAX_MOD_SIZE_OLD;
373 return -EAGAIN;
374 }
375 if (service_rc == 8 && service_rs == 72)
376 return -EINVAL;
377 zdev->online = 0;
378 pr_err("Cryptographic device %x failed and was set offline\n",
379 zdev->ap_dev->qid);
380 ZCRYPT_DBF_DEV(DBF_ERR, zdev, "dev%04xo%drc%d",
381 zdev->ap_dev->qid, zdev->online,
382 msg->hdr.reply_code);
383 return -EAGAIN; /* repeat the request on a different device. */
384 }
385 data = msg->text;
386 reply_len = le16_to_cpu(msg->length) - 2;
387 if (reply_len > outputdatalength)
388 return -EINVAL;
389 /*
390 * For all encipher requests, the length of the ciphertext (reply_len)
391 * will always equal the modulus length. For MEX decipher requests
392 * the output needs to get padded. Minimum pad size is 10.
393 *
394 * Currently, the cases where padding will be added is for:
395 * - PCIXCC_MCL2 using a CRT form token (since PKD didn't support
396 * ZERO-PAD and CRT is only supported for PKD requests)
397 * - PCICC, always
398 */
399 pad_len = outputdatalength - reply_len;
400 if (pad_len > 0) {
401 if (pad_len < 10)
402 return -EINVAL;
403 /* 'restore' padding left in the PCICC/PCIXCC card. */
404 if (copy_to_user(outputdata, static_pad, pad_len - 1))
405 return -EFAULT;
406 if (put_user(0, outputdata + pad_len - 1))
407 return -EFAULT;
408 }
409 /* Copy the crypto response to user space. */
410 if (copy_to_user(outputdata + pad_len, data, reply_len))
411 return -EFAULT;
412 return 0;
413}
414
415static int convert_response(struct zcrypt_device *zdev,
416 struct ap_message *reply,
417 char __user *outputdata,
418 unsigned int outputdatalength)
419{
420 struct type86_reply *msg = reply->message;
421
422 /* Response type byte is the second byte in the response. */
423 switch (msg->hdr.type) {
424 case TYPE82_RSP_CODE:
425 case TYPE88_RSP_CODE:
426 return convert_error(zdev, reply);
427 case TYPE86_RSP_CODE:
428 if (msg->hdr.reply_code)
429 return convert_error(zdev, reply);
430 if (msg->cprb.cprb_ver_id == 0x01)
431 return convert_type86(zdev, reply,
432 outputdata, outputdatalength);
433 /* no break, incorrect cprb version is an unknown response */
434 default: /* Unknown response type, this should NEVER EVER happen */
435 zdev->online = 0;
436 pr_err("Cryptographic device %x failed and was set offline\n",
437 zdev->ap_dev->qid);
438 ZCRYPT_DBF_DEV(DBF_ERR, zdev, "dev%04xo%dfail",
439 zdev->ap_dev->qid, zdev->online);
440 return -EAGAIN; /* repeat the request on a different device. */
441 }
442}
443
444/**
445 * This function is called from the AP bus code after a crypto request
446 * "msg" has finished with the reply message "reply".
447 * It is called from tasklet context.
448 * @ap_dev: pointer to the AP device
449 * @msg: pointer to the AP message
450 * @reply: pointer to the AP reply message
451 */
452static void zcrypt_pcicc_receive(struct ap_device *ap_dev,
453 struct ap_message *msg,
454 struct ap_message *reply)
455{
456 static struct error_hdr error_reply = {
457 .type = TYPE82_RSP_CODE,
458 .reply_code = REP82_ERROR_MACHINE_FAILURE,
459 };
460 struct type86_reply *t86r;
461 int length;
462
463 /* Copy the reply message to the request message buffer. */
464 if (IS_ERR(reply)) {
465 memcpy(msg->message, &error_reply, sizeof(error_reply));
466 goto out;
467 }
468 t86r = reply->message;
469 if (t86r->hdr.type == TYPE86_RSP_CODE &&
470 t86r->cprb.cprb_ver_id == 0x01) {
471 length = sizeof(struct type86_reply) + t86r->length - 2;
472 length = min(PCICC_MAX_RESPONSE_SIZE, length);
473 memcpy(msg->message, reply->message, length);
474 } else
475 memcpy(msg->message, reply->message, sizeof error_reply);
476out:
477 complete((struct completion *) msg->private);
478}
479
480static atomic_t zcrypt_step = ATOMIC_INIT(0);
481
482/**
483 * The request distributor calls this function if it picked the PCICC
484 * device to handle a modexpo request.
485 * @zdev: pointer to zcrypt_device structure that identifies the
486 * PCICC device to the request distributor
487 * @mex: pointer to the modexpo request buffer
488 */
489static long zcrypt_pcicc_modexpo(struct zcrypt_device *zdev,
490 struct ica_rsa_modexpo *mex)
491{
492 struct ap_message ap_msg;
493 struct completion work;
494 int rc;
495
496 ap_init_message(&ap_msg);
497 ap_msg.message = (void *) get_zeroed_page(GFP_KERNEL);
498 if (!ap_msg.message)
499 return -ENOMEM;
500 ap_msg.receive = zcrypt_pcicc_receive;
501 ap_msg.length = PAGE_SIZE;
502 ap_msg.psmid = (((unsigned long long) current->pid) << 32) +
503 atomic_inc_return(&zcrypt_step);
504 ap_msg.private = &work;
505 rc = ICAMEX_msg_to_type6MEX_msg(zdev, &ap_msg, mex);
506 if (rc)
507 goto out_free;
508 init_completion(&work);
509 ap_queue_message(zdev->ap_dev, &ap_msg);
510 rc = wait_for_completion_interruptible(&work);
511 if (rc == 0)
512 rc = convert_response(zdev, &ap_msg, mex->outputdata,
513 mex->outputdatalength);
514 else
515 /* Signal pending. */
516 ap_cancel_message(zdev->ap_dev, &ap_msg);
517out_free:
518 free_page((unsigned long) ap_msg.message);
519 return rc;
520}
521
522/**
523 * The request distributor calls this function if it picked the PCICC
524 * device to handle a modexpo_crt request.
525 * @zdev: pointer to zcrypt_device structure that identifies the
526 * PCICC device to the request distributor
527 * @crt: pointer to the modexpoc_crt request buffer
528 */
529static long zcrypt_pcicc_modexpo_crt(struct zcrypt_device *zdev,
530 struct ica_rsa_modexpo_crt *crt)
531{
532 struct ap_message ap_msg;
533 struct completion work;
534 int rc;
535
536 ap_init_message(&ap_msg);
537 ap_msg.message = (void *) get_zeroed_page(GFP_KERNEL);
538 if (!ap_msg.message)
539 return -ENOMEM;
540 ap_msg.receive = zcrypt_pcicc_receive;
541 ap_msg.length = PAGE_SIZE;
542 ap_msg.psmid = (((unsigned long long) current->pid) << 32) +
543 atomic_inc_return(&zcrypt_step);
544 ap_msg.private = &work;
545 rc = ICACRT_msg_to_type6CRT_msg(zdev, &ap_msg, crt);
546 if (rc)
547 goto out_free;
548 init_completion(&work);
549 ap_queue_message(zdev->ap_dev, &ap_msg);
550 rc = wait_for_completion_interruptible(&work);
551 if (rc == 0)
552 rc = convert_response(zdev, &ap_msg, crt->outputdata,
553 crt->outputdatalength);
554 else
555 /* Signal pending. */
556 ap_cancel_message(zdev->ap_dev, &ap_msg);
557out_free:
558 free_page((unsigned long) ap_msg.message);
559 return rc;
560}
561
562/**
563 * The crypto operations for a PCICC card.
564 */
565static struct zcrypt_ops zcrypt_pcicc_ops = {
566 .rsa_modexpo = zcrypt_pcicc_modexpo,
567 .rsa_modexpo_crt = zcrypt_pcicc_modexpo_crt,
568};
569
570/**
571 * Probe function for PCICC cards. It always accepts the AP device
572 * since the bus_match already checked the hardware type.
573 * @ap_dev: pointer to the AP device.
574 */
575static int zcrypt_pcicc_probe(struct ap_device *ap_dev)
576{
577 struct zcrypt_device *zdev;
578 int rc;
579
580 zdev = zcrypt_device_alloc(PCICC_MAX_RESPONSE_SIZE);
581 if (!zdev)
582 return -ENOMEM;
583 zdev->ap_dev = ap_dev;
584 zdev->ops = &zcrypt_pcicc_ops;
585 zdev->online = 1;
586 zdev->user_space_type = ZCRYPT_PCICC;
587 zdev->type_string = "PCICC";
588 zdev->min_mod_size = PCICC_MIN_MOD_SIZE;
589 zdev->max_mod_size = PCICC_MAX_MOD_SIZE;
590 zdev->speed_rating = PCICC_SPEED_RATING;
591 zdev->max_exp_bit_length = PCICC_MAX_MOD_SIZE;
592 ap_dev->reply = &zdev->reply;
593 ap_dev->private = zdev;
594 rc = zcrypt_device_register(zdev);
595 if (rc)
596 goto out_free;
597 return 0;
598
599 out_free:
600 ap_dev->private = NULL;
601 zcrypt_device_free(zdev);
602 return rc;
603}
604
605/**
606 * This is called to remove the extended PCICC driver information
607 * if an AP device is removed.
608 */
609static void zcrypt_pcicc_remove(struct ap_device *ap_dev)
610{
611 struct zcrypt_device *zdev = ap_dev->private;
612
613 zcrypt_device_unregister(zdev);
614}
615
616int __init zcrypt_pcicc_init(void)
617{
618 return ap_driver_register(&zcrypt_pcicc_driver, THIS_MODULE, "pcicc");
619}
620
621void zcrypt_pcicc_exit(void)
622{
623 ap_driver_unregister(&zcrypt_pcicc_driver);
624}
625
626module_init(zcrypt_pcicc_init);
627module_exit(zcrypt_pcicc_exit);
diff --git a/drivers/s390/crypto/zcrypt_pcicc.h b/drivers/s390/crypto/zcrypt_pcicc.h
deleted file mode 100644
index 7fe27e15075b..000000000000
--- a/drivers/s390/crypto/zcrypt_pcicc.h
+++ /dev/null
@@ -1,174 +0,0 @@
1/*
2 * zcrypt 2.1.0
3 *
4 * Copyright IBM Corp. 2001, 2006
5 * Author(s): Robert Burroughs
6 * Eric Rossman (edrossma@us.ibm.com)
7 *
8 * Hotplug & misc device support: Jochen Roehrig (roehrig@de.ibm.com)
9 * Major cleanup & driver split: Martin Schwidefsky <schwidefsky@de.ibm.com>
10 *
11 * This program is free software; you can redistribute it and/or modify
12 * it under the terms of the GNU General Public License as published by
13 * the Free Software Foundation; either version 2, or (at your option)
14 * any later version.
15 *
16 * This program is distributed in the hope that it will be useful,
17 * but WITHOUT ANY WARRANTY; without even the implied warranty of
18 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
19 * GNU General Public License for more details.
20 *
21 * You should have received a copy of the GNU General Public License
22 * along with this program; if not, write to the Free Software
23 * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
24 */
25
26#ifndef _ZCRYPT_PCICC_H_
27#define _ZCRYPT_PCICC_H_
28
29/**
30 * The type 6 message family is associated with PCICC or PCIXCC cards.
31 *
32 * It contains a message header followed by a CPRB, both of which
33 * are described below.
34 *
35 * Note that all reserved fields must be zeroes.
36 */
37struct type6_hdr {
38 unsigned char reserved1; /* 0x00 */
39 unsigned char type; /* 0x06 */
40 unsigned char reserved2[2]; /* 0x0000 */
41 unsigned char right[4]; /* 0x00000000 */
42 unsigned char reserved3[2]; /* 0x0000 */
43 unsigned char reserved4[2]; /* 0x0000 */
44 unsigned char apfs[4]; /* 0x00000000 */
45 unsigned int offset1; /* 0x00000058 (offset to CPRB) */
46 unsigned int offset2; /* 0x00000000 */
47 unsigned int offset3; /* 0x00000000 */
48 unsigned int offset4; /* 0x00000000 */
49 unsigned char agent_id[16]; /* PCICC: */
50 /* 0x0100 */
51 /* 0x4343412d4150504c202020 */
52 /* 0x010101 */
53 /* PCIXCC: */
54 /* 0x4341000000000000 */
55 /* 0x0000000000000000 */
56 unsigned char rqid[2]; /* rqid. internal to 603 */
57 unsigned char reserved5[2]; /* 0x0000 */
58 unsigned char function_code[2]; /* for PKD, 0x5044 (ascii 'PD') */
59 unsigned char reserved6[2]; /* 0x0000 */
60 unsigned int ToCardLen1; /* (request CPRB len + 3) & -4 */
61 unsigned int ToCardLen2; /* db len 0x00000000 for PKD */
62 unsigned int ToCardLen3; /* 0x00000000 */
63 unsigned int ToCardLen4; /* 0x00000000 */
64 unsigned int FromCardLen1; /* response buffer length */
65 unsigned int FromCardLen2; /* db len 0x00000000 for PKD */
66 unsigned int FromCardLen3; /* 0x00000000 */
67 unsigned int FromCardLen4; /* 0x00000000 */
68} __attribute__((packed));
69
70/**
71 * CPRB
72 * Note that all shorts, ints and longs are little-endian.
73 * All pointer fields are 32-bits long, and mean nothing
74 *
75 * A request CPRB is followed by a request_parameter_block.
76 *
77 * The request (or reply) parameter block is organized thus:
78 * function code
79 * VUD block
80 * key block
81 */
82struct CPRB {
83 unsigned short cprb_len; /* CPRB length */
84 unsigned char cprb_ver_id; /* CPRB version id. */
85 unsigned char pad_000; /* Alignment pad byte. */
86 unsigned char srpi_rtcode[4]; /* SRPI return code LELONG */
87 unsigned char srpi_verb; /* SRPI verb type */
88 unsigned char flags; /* flags */
89 unsigned char func_id[2]; /* function id */
90 unsigned char checkpoint_flag; /* */
91 unsigned char resv2; /* reserved */
92 unsigned short req_parml; /* request parameter buffer */
93 /* length 16-bit little endian */
94 unsigned char req_parmp[4]; /* request parameter buffer *
95 * pointer (means nothing: the *
96 * parameter buffer follows *
97 * the CPRB). */
98 unsigned char req_datal[4]; /* request data buffer */
99 /* length ULELONG */
100 unsigned char req_datap[4]; /* request data buffer */
101 /* pointer */
102 unsigned short rpl_parml; /* reply parameter buffer */
103 /* length 16-bit little endian */
104 unsigned char pad_001[2]; /* Alignment pad bytes. ULESHORT */
105 unsigned char rpl_parmp[4]; /* reply parameter buffer *
106 * pointer (means nothing: the *
107 * parameter buffer follows *
108 * the CPRB). */
109 unsigned char rpl_datal[4]; /* reply data buffer len ULELONG */
110 unsigned char rpl_datap[4]; /* reply data buffer */
111 /* pointer */
112 unsigned short ccp_rscode; /* server reason code ULESHORT */
113 unsigned short ccp_rtcode; /* server return code ULESHORT */
114 unsigned char repd_parml[2]; /* replied parameter len ULESHORT*/
115 unsigned char mac_data_len[2]; /* Mac Data Length ULESHORT */
116 unsigned char repd_datal[4]; /* replied data length ULELONG */
117 unsigned char req_pc[2]; /* PC identifier */
118 unsigned char res_origin[8]; /* resource origin */
119 unsigned char mac_value[8]; /* Mac Value */
120 unsigned char logon_id[8]; /* Logon Identifier */
121 unsigned char usage_domain[2]; /* cdx */
122 unsigned char resv3[18]; /* reserved for requestor */
123 unsigned short svr_namel; /* server name length ULESHORT */
124 unsigned char svr_name[8]; /* server name */
125} __attribute__((packed));
126
127/**
128 * The type 86 message family is associated with PCICC and PCIXCC cards.
129 *
130 * It contains a message header followed by a CPRB. The CPRB is
131 * the same as the request CPRB, which is described above.
132 *
133 * If format is 1, an error condition exists and no data beyond
134 * the 8-byte message header is of interest.
135 *
136 * The non-error message is shown below.
137 *
138 * Note that all reserved fields must be zeroes.
139 */
140struct type86_hdr {
141 unsigned char reserved1; /* 0x00 */
142 unsigned char type; /* 0x86 */
143 unsigned char format; /* 0x01 (error) or 0x02 (ok) */
144 unsigned char reserved2; /* 0x00 */
145 unsigned char reply_code; /* reply code (see above) */
146 unsigned char reserved3[3]; /* 0x000000 */
147} __attribute__((packed));
148
149#define TYPE86_RSP_CODE 0x86
150#define TYPE86_FMT2 0x02
151
152struct type86_fmt2_ext {
153 unsigned char reserved[4]; /* 0x00000000 */
154 unsigned char apfs[4]; /* final status */
155 unsigned int count1; /* length of CPRB + parameters */
156 unsigned int offset1; /* offset to CPRB */
157 unsigned int count2; /* 0x00000000 */
158 unsigned int offset2; /* db offset 0x00000000 for PKD */
159 unsigned int count3; /* 0x00000000 */
160 unsigned int offset3; /* 0x00000000 */
161 unsigned int count4; /* 0x00000000 */
162 unsigned int offset4; /* 0x00000000 */
163} __attribute__((packed));
164
165struct function_and_rules_block {
166 unsigned char function_code[2];
167 unsigned short ulen;
168 unsigned char only_rule[8];
169} __attribute__((packed));
170
171int zcrypt_pcicc_init(void);
172void zcrypt_pcicc_exit(void);
173
174#endif /* _ZCRYPT_PCICC_H_ */
diff --git a/drivers/s390/virtio/virtio_ccw.c b/drivers/s390/virtio/virtio_ccw.c
index e9fae30fafda..b2a1a81e6fc8 100644
--- a/drivers/s390/virtio/virtio_ccw.c
+++ b/drivers/s390/virtio/virtio_ccw.c
@@ -28,6 +28,7 @@
28#include <linux/io.h> 28#include <linux/io.h>
29#include <linux/kvm_para.h> 29#include <linux/kvm_para.h>
30#include <linux/notifier.h> 30#include <linux/notifier.h>
31#include <asm/diag.h>
31#include <asm/setup.h> 32#include <asm/setup.h>
32#include <asm/irq.h> 33#include <asm/irq.h>
33#include <asm/cio.h> 34#include <asm/cio.h>
@@ -366,9 +367,9 @@ static void virtio_ccw_drop_indicator(struct virtio_ccw_device *vcdev,
366 kfree(thinint_area); 367 kfree(thinint_area);
367} 368}
368 369
369static inline long do_kvm_notify(struct subchannel_id schid, 370static inline long __do_kvm_notify(struct subchannel_id schid,
370 unsigned long queue_index, 371 unsigned long queue_index,
371 long cookie) 372 long cookie)
372{ 373{
373 register unsigned long __nr asm("1") = KVM_S390_VIRTIO_CCW_NOTIFY; 374 register unsigned long __nr asm("1") = KVM_S390_VIRTIO_CCW_NOTIFY;
374 register struct subchannel_id __schid asm("2") = schid; 375 register struct subchannel_id __schid asm("2") = schid;
@@ -383,6 +384,14 @@ static inline long do_kvm_notify(struct subchannel_id schid,
383 return __rc; 384 return __rc;
384} 385}
385 386
387static inline long do_kvm_notify(struct subchannel_id schid,
388 unsigned long queue_index,
389 long cookie)
390{
391 diag_stat_inc(DIAG_STAT_X500);
392 return __do_kvm_notify(schid, queue_index, cookie);
393}
394
386static bool virtio_ccw_kvm_notify(struct virtqueue *vq) 395static bool virtio_ccw_kvm_notify(struct virtqueue *vq)
387{ 396{
388 struct virtio_ccw_vq_info *info = vq->priv; 397 struct virtio_ccw_vq_info *info = vq->priv;
diff --git a/drivers/watchdog/diag288_wdt.c b/drivers/watchdog/diag288_wdt.c
index a9a5210143ae..3db9d0e0673d 100644
--- a/drivers/watchdog/diag288_wdt.c
+++ b/drivers/watchdog/diag288_wdt.c
@@ -29,6 +29,7 @@
29#include <linux/watchdog.h> 29#include <linux/watchdog.h>
30#include <linux/suspend.h> 30#include <linux/suspend.h>
31#include <asm/ebcdic.h> 31#include <asm/ebcdic.h>
32#include <asm/diag.h>
32#include <linux/io.h> 33#include <linux/io.h>
33#include <linux/uaccess.h> 34#include <linux/uaccess.h>
34 35
@@ -94,12 +95,14 @@ static int __diag288(unsigned int func, unsigned int timeout,
94static int __diag288_vm(unsigned int func, unsigned int timeout, 95static int __diag288_vm(unsigned int func, unsigned int timeout,
95 char *cmd, size_t len) 96 char *cmd, size_t len)
96{ 97{
98 diag_stat_inc(DIAG_STAT_X288);
97 return __diag288(func, timeout, virt_to_phys(cmd), len); 99 return __diag288(func, timeout, virt_to_phys(cmd), len);
98} 100}
99 101
100static int __diag288_lpar(unsigned int func, unsigned int timeout, 102static int __diag288_lpar(unsigned int func, unsigned int timeout,
101 unsigned long action) 103 unsigned long action)
102{ 104{
105 diag_stat_inc(DIAG_STAT_X288);
103 return __diag288(func, timeout, action, 0); 106 return __diag288(func, timeout, action, 0);
104} 107}
105 108
@@ -141,6 +144,7 @@ static int wdt_stop(struct watchdog_device *dev)
141{ 144{
142 int ret; 145 int ret;
143 146
147 diag_stat_inc(DIAG_STAT_X288);
144 ret = __diag288(WDT_FUNC_CANCEL, 0, 0, 0); 148 ret = __diag288(WDT_FUNC_CANCEL, 0, 0, 0);
145 return ret; 149 return ret;
146} 150}
diff --git a/fs/proc/task_mmu.c b/fs/proc/task_mmu.c
index e2d46adb54b4..b029d426c558 100644
--- a/fs/proc/task_mmu.c
+++ b/fs/proc/task_mmu.c
@@ -754,7 +754,7 @@ static inline void clear_soft_dirty(struct vm_area_struct *vma,
754 754
755 if (pte_present(ptent)) { 755 if (pte_present(ptent)) {
756 ptent = pte_wrprotect(ptent); 756 ptent = pte_wrprotect(ptent);
757 ptent = pte_clear_flags(ptent, _PAGE_SOFT_DIRTY); 757 ptent = pte_clear_soft_dirty(ptent);
758 } else if (is_swap_pte(ptent)) { 758 } else if (is_swap_pte(ptent)) {
759 ptent = pte_swp_clear_soft_dirty(ptent); 759 ptent = pte_swp_clear_soft_dirty(ptent);
760 } 760 }
@@ -768,7 +768,7 @@ static inline void clear_soft_dirty_pmd(struct vm_area_struct *vma,
768 pmd_t pmd = *pmdp; 768 pmd_t pmd = *pmdp;
769 769
770 pmd = pmd_wrprotect(pmd); 770 pmd = pmd_wrprotect(pmd);
771 pmd = pmd_clear_flags(pmd, _PAGE_SOFT_DIRTY); 771 pmd = pmd_clear_soft_dirty(pmd);
772 772
773 if (vma->vm_flags & VM_SOFTDIRTY) 773 if (vma->vm_flags & VM_SOFTDIRTY)
774 vma->vm_flags &= ~VM_SOFTDIRTY; 774 vma->vm_flags &= ~VM_SOFTDIRTY;
diff --git a/include/asm-generic/pgtable.h b/include/asm-generic/pgtable.h
index 3eabbbbfd578..14b0ff32fb9f 100644
--- a/include/asm-generic/pgtable.h
+++ b/include/asm-generic/pgtable.h
@@ -505,6 +505,16 @@ static inline pmd_t pmd_mksoft_dirty(pmd_t pmd)
505 return pmd; 505 return pmd;
506} 506}
507 507
508static inline pte_t pte_clear_soft_dirty(pte_t pte)
509{
510 return pte;
511}
512
513static inline pmd_t pmd_clear_soft_dirty(pmd_t pmd)
514{
515 return pmd;
516}
517
508static inline pte_t pte_swp_mksoft_dirty(pte_t pte) 518static inline pte_t pte_swp_mksoft_dirty(pte_t pte)
509{ 519{
510 return pte; 520 return pte;
diff --git a/include/linux/compiler.h b/include/linux/compiler.h
index fe817432190c..52a459ff75f4 100644
--- a/include/linux/compiler.h
+++ b/include/linux/compiler.h
@@ -56,7 +56,7 @@ extern void __chk_io_ptr(const volatile void __iomem *);
56#include <linux/compiler-gcc.h> 56#include <linux/compiler-gcc.h>
57#endif 57#endif
58 58
59#ifdef CC_USING_HOTPATCH 59#if defined(CC_USING_HOTPATCH) && !defined(__CHECKER__)
60#define notrace __attribute__((hotpatch(0,0))) 60#define notrace __attribute__((hotpatch(0,0)))
61#else 61#else
62#define notrace __attribute__((no_instrument_function)) 62#define notrace __attribute__((no_instrument_function))