aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorLinus Torvalds <torvalds@linux-foundation.org>2016-12-16 12:05:25 -0500
committerLinus Torvalds <torvalds@linux-foundation.org>2016-12-16 12:05:25 -0500
commit57ca04ab440168e101da746ef9edd1ec583b7214 (patch)
tree759e32dd38e7c044f452db68e4f5b977f74ecfae
parent73e2e0c9b13c97df1c8565f6e158caac3c481b44 (diff)
parent0b7589ecca2b6f962cf3314a3a5a675deeefb624 (diff)
Merge branch 'for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/s390/linux
Pull m ore s390 updates from Martin Schwidefsky: "Over 95% of the changes in this pull request are related to the zcrypt driver. There are five improvements for zcrypt: the ID for the CEX6 cards is added, workload balancing and multi-domain support are introduced, the debug logs are overhauled and a set of tracepoints is added. Then there are several patches in regard to inline assemblies. One compile fix and several missing memory clobbers. As far as we can tell the omitted memory clobbers have not caused any breakage. A small change to the PCI arch code, the machine can tells us how big the function measurement blocks are. The PCI function measurement will be disabled for a device if the queried length is larger than the allocated size for these blocks. And two more patches to correct five printk messages. That is it for s390 in regard to the 4.10 merge window. Happy holidays" * 'for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/s390/linux: (23 commits) s390/pci: query fmb length s390/zcrypt: add missing memory clobber to ap_qci inline assembly s390/extmem: add missing memory clobber to dcss_set_subcodes s390/nmi: fix inline assembly constraints s390/lib: add missing memory barriers to string inline assemblies s390/cpumf: fix qsi inline assembly s390/setup: reword printk messages s390/dasd: fix typos in DASD error messages s390: fix compile error with memmove_early() inline assembly s390/zcrypt: tracepoint definitions for zcrypt device driver. s390/zcrypt: Rework debug feature invocations. s390/zcrypt: Improved invalid domain response handling. s390/zcrypt: Fix ap_max_domain_id for older machine types s390/zcrypt: Correct function bits for CEX2x and CEX3x cards. s390/zcrypt: Fixed attrition of AP adapters and domains s390/zcrypt: Introduce new zcrypt device status API s390/zcrypt: add multi domain support s390/zcrypt: Introduce workload balancing s390/zcrypt: get rid of ap_poll_requests s390/zcrypt: header for the AP inline assmblies ...
-rw-r--r--arch/s390/include/asm/cpu_mf.h10
-rw-r--r--arch/s390/include/asm/pci.h1
-rw-r--r--arch/s390/include/asm/pci_clp.h3
-rw-r--r--arch/s390/include/asm/string.h8
-rw-r--r--arch/s390/include/asm/trace/zcrypt.h122
-rw-r--r--arch/s390/include/uapi/asm/zcrypt.h37
-rw-r--r--arch/s390/kernel/early.c2
-rw-r--r--arch/s390/kernel/nmi.c19
-rw-r--r--arch/s390/kernel/setup.c4
-rw-r--r--arch/s390/lib/string.c12
-rw-r--r--arch/s390/mm/extmem.c2
-rw-r--r--arch/s390/pci/pci.c2
-rw-r--r--arch/s390/pci/pci_clp.c1
-rw-r--r--drivers/s390/block/dasd_3990_erp.c6
-rw-r--r--drivers/s390/crypto/Makefile13
-rw-r--r--drivers/s390/crypto/ap_asm.h191
-rw-r--r--drivers/s390/crypto/ap_bus.c1331
-rw-r--r--drivers/s390/crypto/ap_bus.h98
-rw-r--r--drivers/s390/crypto/ap_card.c170
-rw-r--r--drivers/s390/crypto/ap_debug.h28
-rw-r--r--drivers/s390/crypto/ap_queue.c701
-rw-r--r--drivers/s390/crypto/zcrypt_api.c1137
-rw-r--r--drivers/s390/crypto/zcrypt_api.h99
-rw-r--r--drivers/s390/crypto/zcrypt_card.c187
-rw-r--r--drivers/s390/crypto/zcrypt_cex2a.c214
-rw-r--r--drivers/s390/crypto/zcrypt_cex4.c319
-rw-r--r--drivers/s390/crypto/zcrypt_debug.h50
-rw-r--r--drivers/s390/crypto/zcrypt_error.h105
-rw-r--r--drivers/s390/crypto/zcrypt_msgtype50.c135
-rw-r--r--drivers/s390/crypto/zcrypt_msgtype50.h5
-rw-r--r--drivers/s390/crypto/zcrypt_msgtype6.c660
-rw-r--r--drivers/s390/crypto/zcrypt_msgtype6.h23
-rw-r--r--drivers/s390/crypto/zcrypt_pcixcc.c376
-rw-r--r--drivers/s390/crypto/zcrypt_queue.c226
-rw-r--r--include/linux/mod_devicetable.h3
35 files changed, 3915 insertions, 2385 deletions
diff --git a/arch/s390/include/asm/cpu_mf.h b/arch/s390/include/asm/cpu_mf.h
index b69d8bc231a5..428c41239a49 100644
--- a/arch/s390/include/asm/cpu_mf.h
+++ b/arch/s390/include/asm/cpu_mf.h
@@ -213,18 +213,14 @@ static inline int stcctm5(u64 num, u64 *val)
213/* Query sampling information */ 213/* Query sampling information */
214static inline int qsi(struct hws_qsi_info_block *info) 214static inline int qsi(struct hws_qsi_info_block *info)
215{ 215{
216 int cc; 216 int cc = 1;
217 cc = 1;
218 217
219 asm volatile( 218 asm volatile(
220 "0: .insn s,0xb2860000,0(%1)\n" 219 "0: .insn s,0xb2860000,%1\n"
221 "1: lhi %0,0\n" 220 "1: lhi %0,0\n"
222 "2:\n" 221 "2:\n"
223 EX_TABLE(0b, 2b) EX_TABLE(1b, 2b) 222 EX_TABLE(0b, 2b) EX_TABLE(1b, 2b)
224 : "=d" (cc), "+a" (info) 223 : "+d" (cc), "+Q" (*info));
225 : "m" (*info)
226 : "cc", "memory");
227
228 return cc ? -EINVAL : 0; 224 return cc ? -EINVAL : 0;
229} 225}
230 226
diff --git a/arch/s390/include/asm/pci.h b/arch/s390/include/asm/pci.h
index 6611f798d2be..4e3186649578 100644
--- a/arch/s390/include/asm/pci.h
+++ b/arch/s390/include/asm/pci.h
@@ -133,6 +133,7 @@ struct zpci_dev {
133 /* Function measurement block */ 133 /* Function measurement block */
134 struct zpci_fmb *fmb; 134 struct zpci_fmb *fmb;
135 u16 fmb_update; /* update interval */ 135 u16 fmb_update; /* update interval */
136 u16 fmb_length;
136 /* software counters */ 137 /* software counters */
137 atomic64_t allocated_pages; 138 atomic64_t allocated_pages;
138 atomic64_t mapped_pages; 139 atomic64_t mapped_pages;
diff --git a/arch/s390/include/asm/pci_clp.h b/arch/s390/include/asm/pci_clp.h
index c232ef9711f5..d6f1b1d94352 100644
--- a/arch/s390/include/asm/pci_clp.h
+++ b/arch/s390/include/asm/pci_clp.h
@@ -87,7 +87,8 @@ struct clp_rsp_query_pci {
87 u16 pchid; 87 u16 pchid;
88 u32 bar[PCI_BAR_COUNT]; 88 u32 bar[PCI_BAR_COUNT];
89 u8 pfip[CLP_PFIP_NR_SEGMENTS]; /* pci function internal path */ 89 u8 pfip[CLP_PFIP_NR_SEGMENTS]; /* pci function internal path */
90 u32 : 24; 90 u32 : 16;
91 u8 fmb_len;
91 u8 pft; /* pci function type */ 92 u8 pft; /* pci function type */
92 u64 sdma; /* start dma as */ 93 u64 sdma; /* start dma as */
93 u64 edma; /* end dma as */ 94 u64 edma; /* end dma as */
diff --git a/arch/s390/include/asm/string.h b/arch/s390/include/asm/string.h
index 15a3c005c274..e5f5c7074f2c 100644
--- a/arch/s390/include/asm/string.h
+++ b/arch/s390/include/asm/string.h
@@ -62,7 +62,7 @@ static inline void *memchr(const void * s, int c, size_t n)
62 " jl 1f\n" 62 " jl 1f\n"
63 " la %0,0\n" 63 " la %0,0\n"
64 "1:" 64 "1:"
65 : "+a" (ret), "+&a" (s) : "d" (r0) : "cc"); 65 : "+a" (ret), "+&a" (s) : "d" (r0) : "cc", "memory");
66 return (void *) ret; 66 return (void *) ret;
67} 67}
68 68
@@ -74,7 +74,7 @@ static inline void *memscan(void *s, int c, size_t n)
74 asm volatile( 74 asm volatile(
75 "0: srst %0,%1\n" 75 "0: srst %0,%1\n"
76 " jo 0b\n" 76 " jo 0b\n"
77 : "+a" (ret), "+&a" (s) : "d" (r0) : "cc"); 77 : "+a" (ret), "+&a" (s) : "d" (r0) : "cc", "memory");
78 return (void *) ret; 78 return (void *) ret;
79} 79}
80 80
@@ -115,7 +115,7 @@ static inline size_t strlen(const char *s)
115 asm volatile( 115 asm volatile(
116 "0: srst %0,%1\n" 116 "0: srst %0,%1\n"
117 " jo 0b" 117 " jo 0b"
118 : "+d" (r0), "+a" (tmp) : : "cc"); 118 : "+d" (r0), "+a" (tmp) : : "cc", "memory");
119 return r0 - (unsigned long) s; 119 return r0 - (unsigned long) s;
120} 120}
121 121
@@ -128,7 +128,7 @@ static inline size_t strnlen(const char * s, size_t n)
128 asm volatile( 128 asm volatile(
129 "0: srst %0,%1\n" 129 "0: srst %0,%1\n"
130 " jo 0b" 130 " jo 0b"
131 : "+a" (end), "+a" (tmp) : "d" (r0) : "cc"); 131 : "+a" (end), "+a" (tmp) : "d" (r0) : "cc", "memory");
132 return end - s; 132 return end - s;
133} 133}
134#else /* IN_ARCH_STRING_C */ 134#else /* IN_ARCH_STRING_C */
diff --git a/arch/s390/include/asm/trace/zcrypt.h b/arch/s390/include/asm/trace/zcrypt.h
new file mode 100644
index 000000000000..adcb77fafa9d
--- /dev/null
+++ b/arch/s390/include/asm/trace/zcrypt.h
@@ -0,0 +1,122 @@
1/*
2 * Tracepoint definitions for the s390 zcrypt device driver
3 *
4 * Copyright IBM Corp. 2016
5 * Author(s): Harald Freudenberger <freude@de.ibm.com>
6 *
7 * Currently there are two tracepoint events defined here.
8 * An s390_zcrypt_req request event occurs as soon as the request is
9 * recognized by the zcrypt ioctl function. This event may act as some kind
10 * of request-processing-starts-now indication.
11 * As late as possible within the zcrypt ioctl function there occurs the
12 * s390_zcrypt_rep event which may act as the point in time where the
13 * request has been processed by the kernel and the result is about to be
14 * transferred back to userspace.
15 * The glue which binds together request and reply event is the ptr
16 * parameter, which is the local buffer address where the request from
17 * userspace has been stored by the ioctl function.
18 *
19 * The main purpose of this zcrypt tracepoint api is to get some data for
20 * performance measurements together with information about on which card
21 * and queue the request has been processed. It is not an ffdc interface as
22 * there is already code in the zcrypt device driver to serve the s390
23 * debug feature interface.
24 */
25
26#undef TRACE_SYSTEM
27#define TRACE_SYSTEM s390
28
29#if !defined(_TRACE_S390_ZCRYPT_H) || defined(TRACE_HEADER_MULTI_READ)
30#define _TRACE_S390_ZCRYPT_H
31
32#include <linux/tracepoint.h>
33
34#define TP_ICARSAMODEXPO 0x0001
35#define TP_ICARSACRT 0x0002
36#define TB_ZSECSENDCPRB 0x0003
37#define TP_ZSENDEP11CPRB 0x0004
38#define TP_HWRNGCPRB 0x0005
39
40#define show_zcrypt_tp_type(type) \
41 __print_symbolic(type, \
42 { TP_ICARSAMODEXPO, "ICARSAMODEXPO" }, \
43 { TP_ICARSACRT, "ICARSACRT" }, \
44 { TB_ZSECSENDCPRB, "ZSECSENDCPRB" }, \
45 { TP_ZSENDEP11CPRB, "ZSENDEP11CPRB" }, \
46 { TP_HWRNGCPRB, "HWRNGCPRB" })
47
48/**
49 * trace_s390_zcrypt_req - zcrypt request tracepoint function
50 * @ptr: Address of the local buffer where the request from userspace
51 * is stored. Can be used as a unique id to relate together
52 * request and reply.
53 * @type: One of the TP_ defines above.
54 *
55 * Called when a request from userspace is recognised within the ioctl
56 * function of the zcrypt device driver and may act as an entry
57 * timestamp.
58 */
59TRACE_EVENT(s390_zcrypt_req,
60 TP_PROTO(void *ptr, u32 type),
61 TP_ARGS(ptr, type),
62 TP_STRUCT__entry(
63 __field(void *, ptr)
64 __field(u32, type)),
65 TP_fast_assign(
66 __entry->ptr = ptr;
67 __entry->type = type;),
68 TP_printk("ptr=%p type=%s",
69 __entry->ptr,
70 show_zcrypt_tp_type(__entry->type))
71);
72
73/**
74 * trace_s390_zcrypt_rep - zcrypt reply tracepoint function
75 * @ptr: Address of the local buffer where the request from userspace
76 * is stored. Can be used as a unique id to match together
77 * request and reply.
78 * @fc: Function code.
79 * @rc: The bare returncode as returned by the device driver ioctl
80 * function.
81 * @dev: The adapter nr where this request was actually processed.
82 * @dom: Domain id of the device where this request was processed.
83 *
84 * Called upon recognising the reply from the crypto adapter. This
85 * message may act as the exit timestamp for the request but also
86 * carries some info about on which adapter the request was processed
87 * and the returncode from the device driver.
88 */
89TRACE_EVENT(s390_zcrypt_rep,
90 TP_PROTO(void *ptr, u32 fc, u32 rc, u16 dev, u16 dom),
91 TP_ARGS(ptr, fc, rc, dev, dom),
92 TP_STRUCT__entry(
93 __field(void *, ptr)
94 __field(u32, fc)
95 __field(u32, rc)
96 __field(u16, device)
97 __field(u16, domain)),
98 TP_fast_assign(
99 __entry->ptr = ptr;
100 __entry->fc = fc;
101 __entry->rc = rc;
102 __entry->device = dev;
103 __entry->domain = dom;),
104 TP_printk("ptr=%p fc=0x%04x rc=%d dev=0x%02hx domain=0x%04hx",
105 __entry->ptr,
106 (unsigned int) __entry->fc,
107 (int) __entry->rc,
108 (unsigned short) __entry->device,
109 (unsigned short) __entry->domain)
110);
111
112#endif /* _TRACE_S390_ZCRYPT_H */
113
114/* This part must be outside protection */
115
116#undef TRACE_INCLUDE_PATH
117#undef TRACE_INCLUDE_FILE
118
119#define TRACE_INCLUDE_PATH asm/trace
120#define TRACE_INCLUDE_FILE zcrypt
121
122#include <trace/define_trace.h>
diff --git a/arch/s390/include/uapi/asm/zcrypt.h b/arch/s390/include/uapi/asm/zcrypt.h
index f2b18eacaca8..a777f87ef889 100644
--- a/arch/s390/include/uapi/asm/zcrypt.h
+++ b/arch/s390/include/uapi/asm/zcrypt.h
@@ -215,6 +215,42 @@ struct ep11_urb {
215 uint64_t resp; 215 uint64_t resp;
216} __attribute__((packed)); 216} __attribute__((packed));
217 217
218/**
219 * struct zcrypt_device_status
220 * @hwtype: raw hardware type
221 * @qid: 6 bit device index, 8 bit domain
222 * @functions: AP device function bit field 'abcdef'
223 * a, b, c = reserved
224 * d = CCA coprocessor
225 * e = Accelerator
226 * f = EP11 coprocessor
227 * @online online status
228 * @reserved reserved
229 */
230struct zcrypt_device_status {
231 unsigned int hwtype:8;
232 unsigned int qid:14;
233 unsigned int online:1;
234 unsigned int functions:6;
235 unsigned int reserved:3;
236};
237
238#define MAX_ZDEV_CARDIDS 64
239#define MAX_ZDEV_DOMAINS 256
240
241/**
242 * Maximum number of zcrypt devices
243 */
244#define MAX_ZDEV_ENTRIES (MAX_ZDEV_CARDIDS * MAX_ZDEV_DOMAINS)
245
246/**
247 * zcrypt_device_matrix
248 * Device matrix of all zcrypt devices
249 */
250struct zcrypt_device_matrix {
251 struct zcrypt_device_status device[MAX_ZDEV_ENTRIES];
252};
253
218#define AUTOSELECT ((unsigned int)0xFFFFFFFF) 254#define AUTOSELECT ((unsigned int)0xFFFFFFFF)
219 255
220#define ZCRYPT_IOCTL_MAGIC 'z' 256#define ZCRYPT_IOCTL_MAGIC 'z'
@@ -321,6 +357,7 @@ struct ep11_urb {
321#define ICARSACRT _IOC(_IOC_READ|_IOC_WRITE, ZCRYPT_IOCTL_MAGIC, 0x06, 0) 357#define ICARSACRT _IOC(_IOC_READ|_IOC_WRITE, ZCRYPT_IOCTL_MAGIC, 0x06, 0)
322#define ZSECSENDCPRB _IOC(_IOC_READ|_IOC_WRITE, ZCRYPT_IOCTL_MAGIC, 0x81, 0) 358#define ZSECSENDCPRB _IOC(_IOC_READ|_IOC_WRITE, ZCRYPT_IOCTL_MAGIC, 0x81, 0)
323#define ZSENDEP11CPRB _IOC(_IOC_READ|_IOC_WRITE, ZCRYPT_IOCTL_MAGIC, 0x04, 0) 359#define ZSENDEP11CPRB _IOC(_IOC_READ|_IOC_WRITE, ZCRYPT_IOCTL_MAGIC, 0x04, 0)
360#define ZDEVICESTATUS _IOC(_IOC_READ|_IOC_WRITE, ZCRYPT_IOCTL_MAGIC, 0x4f, 0)
324 361
325/* New status calls */ 362/* New status calls */
326#define Z90STAT_TOTALCOUNT _IOR(ZCRYPT_IOCTL_MAGIC, 0x40, int) 363#define Z90STAT_TOTALCOUNT _IOR(ZCRYPT_IOCTL_MAGIC, 0x40, int)
diff --git a/arch/s390/kernel/early.c b/arch/s390/kernel/early.c
index d038c8cea6cb..324f1c147a41 100644
--- a/arch/s390/kernel/early.c
+++ b/arch/s390/kernel/early.c
@@ -417,7 +417,7 @@ static __init void memmove_early(void *dst, const void *src, size_t n)
417 " brctg %[n],0b\n" 417 " brctg %[n],0b\n"
418 "1:\n" 418 "1:\n"
419 : [addr] "=&d" (addr), 419 : [addr] "=&d" (addr),
420 [psw_pgm_addr] "=&Q" (S390_lowcore.program_new_psw.addr), 420 [psw_pgm_addr] "=Q" (S390_lowcore.program_new_psw.addr),
421 [dst] "+&a" (dst), [src] "+&a" (src), [n] "+d" (n) 421 [dst] "+&a" (dst), [src] "+&a" (src), [n] "+d" (n)
422 : [incr] "d" (incr) 422 : [incr] "d" (incr)
423 : "cc", "memory"); 423 : "cc", "memory");
diff --git a/arch/s390/kernel/nmi.c b/arch/s390/kernel/nmi.c
index 9a32f7419d78..9862196b4b89 100644
--- a/arch/s390/kernel/nmi.c
+++ b/arch/s390/kernel/nmi.c
@@ -102,7 +102,7 @@ static int notrace s390_validate_registers(union mci mci, int umode)
102{ 102{
103 int kill_task; 103 int kill_task;
104 u64 zero; 104 u64 zero;
105 void *fpt_save_area, *fpt_creg_save_area; 105 void *fpt_save_area;
106 106
107 kill_task = 0; 107 kill_task = 0;
108 zero = 0; 108 zero = 0;
@@ -130,7 +130,6 @@ static int notrace s390_validate_registers(union mci mci, int umode)
130 kill_task = 1; 130 kill_task = 1;
131 } 131 }
132 fpt_save_area = &S390_lowcore.floating_pt_save_area; 132 fpt_save_area = &S390_lowcore.floating_pt_save_area;
133 fpt_creg_save_area = &S390_lowcore.fpt_creg_save_area;
134 if (!mci.fc) { 133 if (!mci.fc) {
135 /* 134 /*
136 * Floating point control register can't be restored. 135 * Floating point control register can't be restored.
@@ -142,11 +141,13 @@ static int notrace s390_validate_registers(union mci mci, int umode)
142 */ 141 */
143 if (S390_lowcore.fpu_flags & KERNEL_FPC) 142 if (S390_lowcore.fpu_flags & KERNEL_FPC)
144 s390_handle_damage(); 143 s390_handle_damage();
145 asm volatile("lfpc 0(%0)" : : "a" (&zero), "m" (zero)); 144 asm volatile("lfpc %0" : : "Q" (zero));
146 if (!test_cpu_flag(CIF_FPU)) 145 if (!test_cpu_flag(CIF_FPU))
147 kill_task = 1; 146 kill_task = 1;
148 } else 147 } else {
149 asm volatile("lfpc 0(%0)" : : "a" (fpt_creg_save_area)); 148 asm volatile("lfpc %0"
149 : : "Q" (S390_lowcore.fpt_creg_save_area));
150 }
150 151
151 if (!MACHINE_HAS_VX) { 152 if (!MACHINE_HAS_VX) {
152 /* Validate floating point registers */ 153 /* Validate floating point registers */
@@ -167,7 +168,7 @@ static int notrace s390_validate_registers(union mci mci, int umode)
167 " ld 13,104(%0)\n" 168 " ld 13,104(%0)\n"
168 " ld 14,112(%0)\n" 169 " ld 14,112(%0)\n"
169 " ld 15,120(%0)\n" 170 " ld 15,120(%0)\n"
170 : : "a" (fpt_save_area)); 171 : : "a" (fpt_save_area) : "memory");
171 } else { 172 } else {
172 /* Validate vector registers */ 173 /* Validate vector registers */
173 union ctlreg0 cr0; 174 union ctlreg0 cr0;
@@ -217,7 +218,7 @@ static int notrace s390_validate_registers(union mci mci, int umode)
217 } else { 218 } else {
218 asm volatile( 219 asm volatile(
219 " lctlg 0,15,0(%0)" 220 " lctlg 0,15,0(%0)"
220 : : "a" (&S390_lowcore.cregs_save_area)); 221 : : "a" (&S390_lowcore.cregs_save_area) : "memory");
221 } 222 }
222 /* 223 /*
223 * We don't even try to validate the TOD register, since we simply 224 * We don't even try to validate the TOD register, since we simply
@@ -234,9 +235,9 @@ static int notrace s390_validate_registers(union mci mci, int umode)
234 : : : "0", "cc"); 235 : : : "0", "cc");
235 else 236 else
236 asm volatile( 237 asm volatile(
237 " l 0,0(%0)\n" 238 " l 0,%0\n"
238 " sckpf" 239 " sckpf"
239 : : "a" (&S390_lowcore.tod_progreg_save_area) 240 : : "Q" (S390_lowcore.tod_progreg_save_area)
240 : "0", "cc"); 241 : "0", "cc");
241 /* Validate clock comparator register */ 242 /* Validate clock comparator register */
242 set_clock_comparator(S390_lowcore.clock_comparator); 243 set_clock_comparator(S390_lowcore.clock_comparator);
diff --git a/arch/s390/kernel/setup.c b/arch/s390/kernel/setup.c
index adfac9f0a89f..865a48871ca4 100644
--- a/arch/s390/kernel/setup.c
+++ b/arch/s390/kernel/setup.c
@@ -485,7 +485,7 @@ static void __init setup_memory_end(void)
485 max_pfn = max_low_pfn = PFN_DOWN(memory_end); 485 max_pfn = max_low_pfn = PFN_DOWN(memory_end);
486 memblock_remove(memory_end, ULONG_MAX); 486 memblock_remove(memory_end, ULONG_MAX);
487 487
488 pr_notice("Max memory size: %luMB\n", memory_end >> 20); 488 pr_notice("The maximum memory size is %luMB\n", memory_end >> 20);
489} 489}
490 490
491static void __init setup_vmcoreinfo(void) 491static void __init setup_vmcoreinfo(void)
@@ -650,7 +650,7 @@ static void __init check_initrd(void)
650#ifdef CONFIG_BLK_DEV_INITRD 650#ifdef CONFIG_BLK_DEV_INITRD
651 if (INITRD_START && INITRD_SIZE && 651 if (INITRD_START && INITRD_SIZE &&
652 !memblock_is_region_memory(INITRD_START, INITRD_SIZE)) { 652 !memblock_is_region_memory(INITRD_START, INITRD_SIZE)) {
653 pr_err("initrd does not fit memory.\n"); 653 pr_err("The initial RAM disk does not fit into the memory\n");
654 memblock_free(INITRD_START, INITRD_SIZE); 654 memblock_free(INITRD_START, INITRD_SIZE);
655 initrd_start = initrd_end = 0; 655 initrd_start = initrd_end = 0;
656 } 656 }
diff --git a/arch/s390/lib/string.c b/arch/s390/lib/string.c
index 48352bffbc92..f71d9f655970 100644
--- a/arch/s390/lib/string.c
+++ b/arch/s390/lib/string.c
@@ -20,7 +20,7 @@ static inline char *__strend(const char *s)
20 20
21 asm volatile ("0: srst %0,%1\n" 21 asm volatile ("0: srst %0,%1\n"
22 " jo 0b" 22 " jo 0b"
23 : "+d" (r0), "+a" (s) : : "cc" ); 23 : "+d" (r0), "+a" (s) : : "cc", "memory");
24 return (char *) r0; 24 return (char *) r0;
25} 25}
26 26
@@ -31,7 +31,7 @@ static inline char *__strnend(const char *s, size_t n)
31 31
32 asm volatile ("0: srst %0,%1\n" 32 asm volatile ("0: srst %0,%1\n"
33 " jo 0b" 33 " jo 0b"
34 : "+d" (p), "+a" (s) : "d" (r0) : "cc" ); 34 : "+d" (p), "+a" (s) : "d" (r0) : "cc", "memory");
35 return (char *) p; 35 return (char *) p;
36} 36}
37 37
@@ -213,7 +213,7 @@ int strcmp(const char *cs, const char *ct)
213 " sr %0,%1\n" 213 " sr %0,%1\n"
214 "1:" 214 "1:"
215 : "+d" (ret), "+d" (r0), "+a" (cs), "+a" (ct) 215 : "+d" (ret), "+d" (r0), "+a" (cs), "+a" (ct)
216 : : "cc" ); 216 : : "cc", "memory");
217 return ret; 217 return ret;
218} 218}
219EXPORT_SYMBOL(strcmp); 219EXPORT_SYMBOL(strcmp);
@@ -250,7 +250,7 @@ static inline int clcle(const char *s1, unsigned long l1,
250 " ipm %0\n" 250 " ipm %0\n"
251 " srl %0,28" 251 " srl %0,28"
252 : "=&d" (cc), "+a" (r2), "+a" (r3), 252 : "=&d" (cc), "+a" (r2), "+a" (r3),
253 "+a" (r4), "+a" (r5) : : "cc"); 253 "+a" (r4), "+a" (r5) : : "cc", "memory");
254 return cc; 254 return cc;
255} 255}
256 256
@@ -298,7 +298,7 @@ void *memchr(const void *s, int c, size_t n)
298 " jl 1f\n" 298 " jl 1f\n"
299 " la %0,0\n" 299 " la %0,0\n"
300 "1:" 300 "1:"
301 : "+a" (ret), "+&a" (s) : "d" (r0) : "cc" ); 301 : "+a" (ret), "+&a" (s) : "d" (r0) : "cc", "memory");
302 return (void *) ret; 302 return (void *) ret;
303} 303}
304EXPORT_SYMBOL(memchr); 304EXPORT_SYMBOL(memchr);
@@ -336,7 +336,7 @@ void *memscan(void *s, int c, size_t n)
336 336
337 asm volatile ("0: srst %0,%1\n" 337 asm volatile ("0: srst %0,%1\n"
338 " jo 0b\n" 338 " jo 0b\n"
339 : "+a" (ret), "+&a" (s) : "d" (r0) : "cc" ); 339 : "+a" (ret), "+&a" (s) : "d" (r0) : "cc", "memory");
340 return (void *) ret; 340 return (void *) ret;
341} 341}
342EXPORT_SYMBOL(memscan); 342EXPORT_SYMBOL(memscan);
diff --git a/arch/s390/mm/extmem.c b/arch/s390/mm/extmem.c
index 02042b6b66bf..362237203144 100644
--- a/arch/s390/mm/extmem.c
+++ b/arch/s390/mm/extmem.c
@@ -122,7 +122,7 @@ dcss_set_subcodes(void)
122 "1: la %2,3\n" 122 "1: la %2,3\n"
123 "2:\n" 123 "2:\n"
124 EX_TABLE(0b, 1b) 124 EX_TABLE(0b, 1b)
125 : "+d" (rx), "+d" (ry), "=d" (rc) : : "cc"); 125 : "+d" (rx), "+d" (ry), "=d" (rc) : : "cc", "memory");
126 126
127 kfree(name); 127 kfree(name);
128 /* Diag x'64' new subcodes are supported, set to new subcodes */ 128 /* Diag x'64' new subcodes are supported, set to new subcodes */
diff --git a/arch/s390/pci/pci.c b/arch/s390/pci/pci.c
index 64e1734bebb7..38e17d4d9884 100644
--- a/arch/s390/pci/pci.c
+++ b/arch/s390/pci/pci.c
@@ -180,7 +180,7 @@ int zpci_fmb_enable_device(struct zpci_dev *zdev)
180{ 180{
181 struct mod_pci_args args = { 0, 0, 0, 0 }; 181 struct mod_pci_args args = { 0, 0, 0, 0 };
182 182
183 if (zdev->fmb) 183 if (zdev->fmb || sizeof(*zdev->fmb) < zdev->fmb_length)
184 return -EINVAL; 184 return -EINVAL;
185 185
186 zdev->fmb = kmem_cache_zalloc(zdev_fmb_cache, GFP_KERNEL); 186 zdev->fmb = kmem_cache_zalloc(zdev_fmb_cache, GFP_KERNEL);
diff --git a/arch/s390/pci/pci_clp.c b/arch/s390/pci/pci_clp.c
index e3ef63b36b5a..1c3332ac1957 100644
--- a/arch/s390/pci/pci_clp.c
+++ b/arch/s390/pci/pci_clp.c
@@ -148,6 +148,7 @@ static int clp_store_query_pci_fn(struct zpci_dev *zdev,
148 zdev->pft = response->pft; 148 zdev->pft = response->pft;
149 zdev->vfn = response->vfn; 149 zdev->vfn = response->vfn;
150 zdev->uid = response->uid; 150 zdev->uid = response->uid;
151 zdev->fmb_length = sizeof(u32) * response->fmb_len;
151 152
152 memcpy(zdev->pfip, response->pfip, sizeof(zdev->pfip)); 153 memcpy(zdev->pfip, response->pfip, sizeof(zdev->pfip));
153 if (response->util_str_avail) { 154 if (response->util_str_avail) {
diff --git a/drivers/s390/block/dasd_3990_erp.c b/drivers/s390/block/dasd_3990_erp.c
index 95f7645e3c37..774da20ceb58 100644
--- a/drivers/s390/block/dasd_3990_erp.c
+++ b/drivers/s390/block/dasd_3990_erp.c
@@ -674,7 +674,7 @@ dasd_3990_handle_env_data(struct dasd_ccw_req * erp, char *sense)
674 break; 674 break;
675 case 0x0D: 675 case 0x0D:
676 dev_warn(&device->cdev->dev, 676 dev_warn(&device->cdev->dev,
677 "FORMAT 4 - No syn byte in count " 677 "FORMAT 4 - No sync byte in count "
678 "address area; offset active\n"); 678 "address area; offset active\n");
679 break; 679 break;
680 case 0x0E: 680 case 0x0E:
@@ -684,7 +684,7 @@ dasd_3990_handle_env_data(struct dasd_ccw_req * erp, char *sense)
684 break; 684 break;
685 case 0x0F: 685 case 0x0F:
686 dev_warn(&device->cdev->dev, 686 dev_warn(&device->cdev->dev,
687 "FORMAT 4 - No syn byte in data area; " 687 "FORMAT 4 - No sync byte in data area; "
688 "offset active\n"); 688 "offset active\n");
689 break; 689 break;
690 default: 690 default:
@@ -999,7 +999,7 @@ dasd_3990_handle_env_data(struct dasd_ccw_req * erp, char *sense)
999 break; 999 break;
1000 default: 1000 default:
1001 dev_warn(&device->cdev->dev, 1001 dev_warn(&device->cdev->dev,
1002 "FORMAT D - Reserved\n"); 1002 "FORMAT F - Reserved\n");
1003 } 1003 }
1004 break; 1004 break;
1005 1005
diff --git a/drivers/s390/crypto/Makefile b/drivers/s390/crypto/Makefile
index b8ab18676e69..0a7fb83f35e5 100644
--- a/drivers/s390/crypto/Makefile
+++ b/drivers/s390/crypto/Makefile
@@ -2,10 +2,11 @@
2# S/390 crypto devices 2# S/390 crypto devices
3# 3#
4 4
5ap-objs := ap_bus.o 5ap-objs := ap_bus.o ap_card.o ap_queue.o
6# zcrypt_api depends on ap 6obj-$(subst m,y,$(CONFIG_ZCRYPT)) += ap.o
7obj-$(CONFIG_ZCRYPT) += ap.o zcrypt_api.o 7# zcrypt_api.o and zcrypt_msgtype*.o depend on ap.o
8# msgtype* depend on zcrypt_api 8zcrypt-objs := zcrypt_api.o zcrypt_card.o zcrypt_queue.o
9obj-$(CONFIG_ZCRYPT) += zcrypt_msgtype6.o zcrypt_msgtype50.o 9zcrypt-objs += zcrypt_msgtype6.o zcrypt_msgtype50.o
10# adapter drivers depend on ap, zcrypt_api and msgtype* 10obj-$(CONFIG_ZCRYPT) += zcrypt.o
11# adapter drivers depend on ap.o and zcrypt.o
11obj-$(CONFIG_ZCRYPT) += zcrypt_pcixcc.o zcrypt_cex2a.o zcrypt_cex4.o 12obj-$(CONFIG_ZCRYPT) += zcrypt_pcixcc.o zcrypt_cex2a.o zcrypt_cex4.o
diff --git a/drivers/s390/crypto/ap_asm.h b/drivers/s390/crypto/ap_asm.h
new file mode 100644
index 000000000000..7a630047c372
--- /dev/null
+++ b/drivers/s390/crypto/ap_asm.h
@@ -0,0 +1,191 @@
1/*
2 * Copyright IBM Corp. 2016
3 * Author(s): Martin Schwidefsky <schwidefsky@de.ibm.com>
4 *
5 * Adjunct processor bus inline assemblies.
6 */
7
8#ifndef _AP_ASM_H_
9#define _AP_ASM_H_
10
11#include <asm/isc.h>
12
13/**
14 * ap_intructions_available() - Test if AP instructions are available.
15 *
16 * Returns 0 if the AP instructions are installed.
17 */
18static inline int ap_instructions_available(void)
19{
20 register unsigned long reg0 asm ("0") = AP_MKQID(0, 0);
21 register unsigned long reg1 asm ("1") = -ENODEV;
22 register unsigned long reg2 asm ("2") = 0UL;
23
24 asm volatile(
25 " .long 0xb2af0000\n" /* PQAP(TAPQ) */
26 "0: la %1,0\n"
27 "1:\n"
28 EX_TABLE(0b, 1b)
29 : "+d" (reg0), "+d" (reg1), "+d" (reg2) : : "cc");
30 return reg1;
31}
32
33/**
34 * ap_tapq(): Test adjunct processor queue.
35 * @qid: The AP queue number
36 * @info: Pointer to queue descriptor
37 *
38 * Returns AP queue status structure.
39 */
40static inline struct ap_queue_status ap_tapq(ap_qid_t qid, unsigned long *info)
41{
42 register unsigned long reg0 asm ("0") = qid;
43 register struct ap_queue_status reg1 asm ("1");
44 register unsigned long reg2 asm ("2") = 0UL;
45
46 asm volatile(".long 0xb2af0000" /* PQAP(TAPQ) */
47 : "+d" (reg0), "=d" (reg1), "+d" (reg2) : : "cc");
48 if (info)
49 *info = reg2;
50 return reg1;
51}
52
53/**
54 * ap_pqap_rapq(): Reset adjunct processor queue.
55 * @qid: The AP queue number
56 *
57 * Returns AP queue status structure.
58 */
59static inline struct ap_queue_status ap_rapq(ap_qid_t qid)
60{
61 register unsigned long reg0 asm ("0") = qid | 0x01000000UL;
62 register struct ap_queue_status reg1 asm ("1");
63 register unsigned long reg2 asm ("2") = 0UL;
64
65 asm volatile(
66 ".long 0xb2af0000" /* PQAP(RAPQ) */
67 : "+d" (reg0), "=d" (reg1), "+d" (reg2) : : "cc");
68 return reg1;
69}
70
71/**
72 * ap_aqic(): Enable interruption for a specific AP.
73 * @qid: The AP queue number
74 * @ind: The notification indicator byte
75 *
76 * Returns AP queue status.
77 */
78static inline struct ap_queue_status ap_aqic(ap_qid_t qid, void *ind)
79{
80 register unsigned long reg0 asm ("0") = qid | (3UL << 24);
81 register unsigned long reg1_in asm ("1") = (8UL << 44) | AP_ISC;
82 register struct ap_queue_status reg1_out asm ("1");
83 register void *reg2 asm ("2") = ind;
84
85 asm volatile(
86 ".long 0xb2af0000" /* PQAP(AQIC) */
87 : "+d" (reg0), "+d" (reg1_in), "=d" (reg1_out), "+d" (reg2)
88 :
89 : "cc");
90 return reg1_out;
91}
92
93/**
94 * ap_qci(): Get AP configuration data
95 *
96 * Returns 0 on success, or -EOPNOTSUPP.
97 */
98static inline int ap_qci(void *config)
99{
100 register unsigned long reg0 asm ("0") = 0x04000000UL;
101 register unsigned long reg1 asm ("1") = -EINVAL;
102 register void *reg2 asm ("2") = (void *) config;
103
104 asm volatile(
105 ".long 0xb2af0000\n" /* PQAP(QCI) */
106 "0: la %1,0\n"
107 "1:\n"
108 EX_TABLE(0b, 1b)
109 : "+d" (reg0), "+d" (reg1), "+d" (reg2)
110 :
111 : "cc", "memory");
112
113 return reg1;
114}
115
116/**
117 * ap_nqap(): Send message to adjunct processor queue.
118 * @qid: The AP queue number
119 * @psmid: The program supplied message identifier
120 * @msg: The message text
121 * @length: The message length
122 *
123 * Returns AP queue status structure.
124 * Condition code 1 on NQAP can't happen because the L bit is 1.
125 * Condition code 2 on NQAP also means the send is incomplete,
126 * because a segment boundary was reached. The NQAP is repeated.
127 */
128static inline struct ap_queue_status ap_nqap(ap_qid_t qid,
129 unsigned long long psmid,
130 void *msg, size_t length)
131{
132 struct msgblock { char _[length]; };
133 register unsigned long reg0 asm ("0") = qid | 0x40000000UL;
134 register struct ap_queue_status reg1 asm ("1");
135 register unsigned long reg2 asm ("2") = (unsigned long) msg;
136 register unsigned long reg3 asm ("3") = (unsigned long) length;
137 register unsigned long reg4 asm ("4") = (unsigned int) (psmid >> 32);
138 register unsigned long reg5 asm ("5") = psmid & 0xffffffff;
139
140 asm volatile (
141 "0: .long 0xb2ad0042\n" /* NQAP */
142 " brc 2,0b"
143 : "+d" (reg0), "=d" (reg1), "+d" (reg2), "+d" (reg3)
144 : "d" (reg4), "d" (reg5), "m" (*(struct msgblock *) msg)
145 : "cc");
146 return reg1;
147}
148
149/**
150 * ap_dqap(): Receive message from adjunct processor queue.
151 * @qid: The AP queue number
152 * @psmid: Pointer to program supplied message identifier
153 * @msg: The message text
154 * @length: The message length
155 *
156 * Returns AP queue status structure.
157 * Condition code 1 on DQAP means the receive has taken place
158 * but only partially. The response is incomplete, hence the
159 * DQAP is repeated.
160 * Condition code 2 on DQAP also means the receive is incomplete,
161 * this time because a segment boundary was reached. Again, the
162 * DQAP is repeated.
163 * Note that gpr2 is used by the DQAP instruction to keep track of
164 * any 'residual' length, in case the instruction gets interrupted.
165 * Hence it gets zeroed before the instruction.
166 */
167static inline struct ap_queue_status ap_dqap(ap_qid_t qid,
168 unsigned long long *psmid,
169 void *msg, size_t length)
170{
171 struct msgblock { char _[length]; };
172 register unsigned long reg0 asm("0") = qid | 0x80000000UL;
173 register struct ap_queue_status reg1 asm ("1");
174 register unsigned long reg2 asm("2") = 0UL;
175 register unsigned long reg4 asm("4") = (unsigned long) msg;
176 register unsigned long reg5 asm("5") = (unsigned long) length;
177 register unsigned long reg6 asm("6") = 0UL;
178 register unsigned long reg7 asm("7") = 0UL;
179
180
181 asm volatile(
182 "0: .long 0xb2ae0064\n" /* DQAP */
183 " brc 6,0b\n"
184 : "+d" (reg0), "=d" (reg1), "+d" (reg2),
185 "+d" (reg4), "+d" (reg5), "+d" (reg6), "+d" (reg7),
186 "=m" (*(struct msgblock *) msg) : : "cc");
187 *psmid = (((unsigned long long) reg6) << 32) + reg7;
188 return reg1;
189}
190
191#endif /* _AP_ASM_H_ */
diff --git a/drivers/s390/crypto/ap_bus.c b/drivers/s390/crypto/ap_bus.c
index f407b4f9d0ba..6d75984a3d85 100644
--- a/drivers/s390/crypto/ap_bus.c
+++ b/drivers/s390/crypto/ap_bus.c
@@ -46,8 +46,12 @@
46#include <linux/ktime.h> 46#include <linux/ktime.h>
47#include <asm/facility.h> 47#include <asm/facility.h>
48#include <linux/crypto.h> 48#include <linux/crypto.h>
49#include <linux/mod_devicetable.h>
50#include <linux/debugfs.h>
49 51
50#include "ap_bus.h" 52#include "ap_bus.h"
53#include "ap_asm.h"
54#include "ap_debug.h"
51 55
52/* 56/*
53 * Module description. 57 * Module description.
@@ -62,6 +66,7 @@ MODULE_ALIAS_CRYPTO("z90crypt");
62 * Module parameter 66 * Module parameter
63 */ 67 */
64int ap_domain_index = -1; /* Adjunct Processor Domain Index */ 68int ap_domain_index = -1; /* Adjunct Processor Domain Index */
69static DEFINE_SPINLOCK(ap_domain_lock);
65module_param_named(domain, ap_domain_index, int, S_IRUSR|S_IRGRP); 70module_param_named(domain, ap_domain_index, int, S_IRUSR|S_IRGRP);
66MODULE_PARM_DESC(domain, "domain index for ap devices"); 71MODULE_PARM_DESC(domain, "domain index for ap devices");
67EXPORT_SYMBOL(ap_domain_index); 72EXPORT_SYMBOL(ap_domain_index);
@@ -70,13 +75,21 @@ static int ap_thread_flag = 0;
70module_param_named(poll_thread, ap_thread_flag, int, S_IRUSR|S_IRGRP); 75module_param_named(poll_thread, ap_thread_flag, int, S_IRUSR|S_IRGRP);
71MODULE_PARM_DESC(poll_thread, "Turn on/off poll thread, default is 0 (off)."); 76MODULE_PARM_DESC(poll_thread, "Turn on/off poll thread, default is 0 (off).");
72 77
73static struct device *ap_root_device = NULL; 78static struct device *ap_root_device;
79
80DEFINE_SPINLOCK(ap_list_lock);
81LIST_HEAD(ap_card_list);
82
74static struct ap_config_info *ap_configuration; 83static struct ap_config_info *ap_configuration;
75static DEFINE_SPINLOCK(ap_device_list_lock);
76static LIST_HEAD(ap_device_list);
77static bool initialised; 84static bool initialised;
78 85
79/* 86/*
87 * AP bus related debug feature things.
88 */
89static struct dentry *ap_dbf_root;
90debug_info_t *ap_dbf_info;
91
92/*
80 * Workqueue timer for bus rescan. 93 * Workqueue timer for bus rescan.
81 */ 94 */
82static struct timer_list ap_config_timer; 95static struct timer_list ap_config_timer;
@@ -89,7 +102,6 @@ static DECLARE_WORK(ap_scan_work, ap_scan_bus);
89 */ 102 */
90static void ap_tasklet_fn(unsigned long); 103static void ap_tasklet_fn(unsigned long);
91static DECLARE_TASKLET(ap_tasklet, ap_tasklet_fn, 0); 104static DECLARE_TASKLET(ap_tasklet, ap_tasklet_fn, 0);
92static atomic_t ap_poll_requests = ATOMIC_INIT(0);
93static DECLARE_WAIT_QUEUE_HEAD(ap_poll_wait); 105static DECLARE_WAIT_QUEUE_HEAD(ap_poll_wait);
94static struct task_struct *ap_poll_kthread = NULL; 106static struct task_struct *ap_poll_kthread = NULL;
95static DEFINE_MUTEX(ap_poll_thread_mutex); 107static DEFINE_MUTEX(ap_poll_thread_mutex);
@@ -129,23 +141,17 @@ static inline int ap_using_interrupts(void)
129} 141}
130 142
131/** 143/**
132 * ap_intructions_available() - Test if AP instructions are available. 144 * ap_airq_ptr() - Get the address of the adapter interrupt indicator
133 * 145 *
134 * Returns 0 if the AP instructions are installed. 146 * Returns the address of the local-summary-indicator of the adapter
147 * interrupt handler for AP, or NULL if adapter interrupts are not
148 * available.
135 */ 149 */
136static inline int ap_instructions_available(void) 150void *ap_airq_ptr(void)
137{ 151{
138 register unsigned long reg0 asm ("0") = AP_MKQID(0,0); 152 if (ap_using_interrupts())
139 register unsigned long reg1 asm ("1") = -ENODEV; 153 return ap_airq.lsi_ptr;
140 register unsigned long reg2 asm ("2") = 0UL; 154 return NULL;
141
142 asm volatile(
143 " .long 0xb2af0000\n" /* PQAP(TAPQ) */
144 "0: la %1,0\n"
145 "1:\n"
146 EX_TABLE(0b, 1b)
147 : "+d" (reg0), "+d" (reg1), "+d" (reg2) : : "cc" );
148 return reg1;
149} 155}
150 156
151/** 157/**
@@ -169,19 +175,6 @@ static int ap_configuration_available(void)
169 return test_facility(12); 175 return test_facility(12);
170} 176}
171 177
172static inline struct ap_queue_status
173__pqap_tapq(ap_qid_t qid, unsigned long *info)
174{
175 register unsigned long reg0 asm ("0") = qid;
176 register struct ap_queue_status reg1 asm ("1");
177 register unsigned long reg2 asm ("2") = 0UL;
178
179 asm volatile(".long 0xb2af0000" /* PQAP(TAPQ) */
180 : "+d" (reg0), "=d" (reg1), "+d" (reg2) : : "cc");
181 *info = reg2;
182 return reg1;
183}
184
185/** 178/**
186 * ap_test_queue(): Test adjunct processor queue. 179 * ap_test_queue(): Test adjunct processor queue.
187 * @qid: The AP queue number 180 * @qid: The AP queue number
@@ -192,85 +185,16 @@ __pqap_tapq(ap_qid_t qid, unsigned long *info)
192static inline struct ap_queue_status 185static inline struct ap_queue_status
193ap_test_queue(ap_qid_t qid, unsigned long *info) 186ap_test_queue(ap_qid_t qid, unsigned long *info)
194{ 187{
195 struct ap_queue_status aqs;
196 unsigned long _info;
197
198 if (test_facility(15)) 188 if (test_facility(15))
199 qid |= 1UL << 23; /* set APFT T bit*/ 189 qid |= 1UL << 23; /* set APFT T bit*/
200 aqs = __pqap_tapq(qid, &_info); 190 return ap_tapq(qid, info);
201 if (info)
202 *info = _info;
203 return aqs;
204}
205
206/**
207 * ap_reset_queue(): Reset adjunct processor queue.
208 * @qid: The AP queue number
209 *
210 * Returns AP queue status structure.
211 */
212static inline struct ap_queue_status ap_reset_queue(ap_qid_t qid)
213{
214 register unsigned long reg0 asm ("0") = qid | 0x01000000UL;
215 register struct ap_queue_status reg1 asm ("1");
216 register unsigned long reg2 asm ("2") = 0UL;
217
218 asm volatile(
219 ".long 0xb2af0000" /* PQAP(RAPQ) */
220 : "+d" (reg0), "=d" (reg1), "+d" (reg2) : : "cc");
221 return reg1;
222}
223
224/**
225 * ap_queue_interruption_control(): Enable interruption for a specific AP.
226 * @qid: The AP queue number
227 * @ind: The notification indicator byte
228 *
229 * Returns AP queue status.
230 */
231static inline struct ap_queue_status
232ap_queue_interruption_control(ap_qid_t qid, void *ind)
233{
234 register unsigned long reg0 asm ("0") = qid | 0x03000000UL;
235 register unsigned long reg1_in asm ("1") = 0x0000800000000000UL | AP_ISC;
236 register struct ap_queue_status reg1_out asm ("1");
237 register void *reg2 asm ("2") = ind;
238 asm volatile(
239 ".long 0xb2af0000" /* PQAP(AQIC) */
240 : "+d" (reg0), "+d" (reg1_in), "=d" (reg1_out), "+d" (reg2)
241 :
242 : "cc" );
243 return reg1_out;
244}
245
246/**
247 * ap_query_configuration(): Get AP configuration data
248 *
249 * Returns 0 on success, or -EOPNOTSUPP.
250 */
251static inline int __ap_query_configuration(void)
252{
253 register unsigned long reg0 asm ("0") = 0x04000000UL;
254 register unsigned long reg1 asm ("1") = -EINVAL;
255 register void *reg2 asm ("2") = (void *) ap_configuration;
256
257 asm volatile(
258 ".long 0xb2af0000\n" /* PQAP(QCI) */
259 "0: la %1,0\n"
260 "1:\n"
261 EX_TABLE(0b, 1b)
262 : "+d" (reg0), "+d" (reg1), "+d" (reg2)
263 :
264 : "cc");
265
266 return reg1;
267} 191}
268 192
269static inline int ap_query_configuration(void) 193static inline int ap_query_configuration(void)
270{ 194{
271 if (!ap_configuration) 195 if (!ap_configuration)
272 return -EOPNOTSUPP; 196 return -EOPNOTSUPP;
273 return __ap_query_configuration(); 197 return ap_qci(ap_configuration);
274} 198}
275 199
276/** 200/**
@@ -331,162 +255,6 @@ static inline int ap_test_config_domain(unsigned int domain)
331} 255}
332 256
333/** 257/**
334 * ap_queue_enable_interruption(): Enable interruption on an AP.
335 * @qid: The AP queue number
336 * @ind: the notification indicator byte
337 *
338 * Enables interruption on AP queue via ap_queue_interruption_control(). Based
339 * on the return value it waits a while and tests the AP queue if interrupts
340 * have been switched on using ap_test_queue().
341 */
342static int ap_queue_enable_interruption(struct ap_device *ap_dev, void *ind)
343{
344 struct ap_queue_status status;
345
346 status = ap_queue_interruption_control(ap_dev->qid, ind);
347 switch (status.response_code) {
348 case AP_RESPONSE_NORMAL:
349 case AP_RESPONSE_OTHERWISE_CHANGED:
350 return 0;
351 case AP_RESPONSE_Q_NOT_AVAIL:
352 case AP_RESPONSE_DECONFIGURED:
353 case AP_RESPONSE_CHECKSTOPPED:
354 case AP_RESPONSE_INVALID_ADDRESS:
355 pr_err("Registering adapter interrupts for AP %d failed\n",
356 AP_QID_DEVICE(ap_dev->qid));
357 return -EOPNOTSUPP;
358 case AP_RESPONSE_RESET_IN_PROGRESS:
359 case AP_RESPONSE_BUSY:
360 default:
361 return -EBUSY;
362 }
363}
364
365static inline struct ap_queue_status
366__nqap(ap_qid_t qid, unsigned long long psmid, void *msg, size_t length)
367{
368 typedef struct { char _[length]; } msgblock;
369 register unsigned long reg0 asm ("0") = qid | 0x40000000UL;
370 register struct ap_queue_status reg1 asm ("1");
371 register unsigned long reg2 asm ("2") = (unsigned long) msg;
372 register unsigned long reg3 asm ("3") = (unsigned long) length;
373 register unsigned long reg4 asm ("4") = (unsigned int) (psmid >> 32);
374 register unsigned long reg5 asm ("5") = psmid & 0xffffffff;
375
376 asm volatile (
377 "0: .long 0xb2ad0042\n" /* NQAP */
378 " brc 2,0b"
379 : "+d" (reg0), "=d" (reg1), "+d" (reg2), "+d" (reg3)
380 : "d" (reg4), "d" (reg5), "m" (*(msgblock *) msg)
381 : "cc");
382 return reg1;
383}
384
385/**
386 * __ap_send(): Send message to adjunct processor queue.
387 * @qid: The AP queue number
388 * @psmid: The program supplied message identifier
389 * @msg: The message text
390 * @length: The message length
391 * @special: Special Bit
392 *
393 * Returns AP queue status structure.
394 * Condition code 1 on NQAP can't happen because the L bit is 1.
395 * Condition code 2 on NQAP also means the send is incomplete,
396 * because a segment boundary was reached. The NQAP is repeated.
397 */
398static inline struct ap_queue_status
399__ap_send(ap_qid_t qid, unsigned long long psmid, void *msg, size_t length,
400 unsigned int special)
401{
402 if (special == 1)
403 qid |= 0x400000UL;
404 return __nqap(qid, psmid, msg, length);
405}
406
407int ap_send(ap_qid_t qid, unsigned long long psmid, void *msg, size_t length)
408{
409 struct ap_queue_status status;
410
411 status = __ap_send(qid, psmid, msg, length, 0);
412 switch (status.response_code) {
413 case AP_RESPONSE_NORMAL:
414 return 0;
415 case AP_RESPONSE_Q_FULL:
416 case AP_RESPONSE_RESET_IN_PROGRESS:
417 return -EBUSY;
418 case AP_RESPONSE_REQ_FAC_NOT_INST:
419 return -EINVAL;
420 default: /* Device is gone. */
421 return -ENODEV;
422 }
423}
424EXPORT_SYMBOL(ap_send);
425
426/**
427 * __ap_recv(): Receive message from adjunct processor queue.
428 * @qid: The AP queue number
429 * @psmid: Pointer to program supplied message identifier
430 * @msg: The message text
431 * @length: The message length
432 *
433 * Returns AP queue status structure.
434 * Condition code 1 on DQAP means the receive has taken place
435 * but only partially. The response is incomplete, hence the
436 * DQAP is repeated.
437 * Condition code 2 on DQAP also means the receive is incomplete,
438 * this time because a segment boundary was reached. Again, the
439 * DQAP is repeated.
440 * Note that gpr2 is used by the DQAP instruction to keep track of
441 * any 'residual' length, in case the instruction gets interrupted.
442 * Hence it gets zeroed before the instruction.
443 */
444static inline struct ap_queue_status
445__ap_recv(ap_qid_t qid, unsigned long long *psmid, void *msg, size_t length)
446{
447 typedef struct { char _[length]; } msgblock;
448 register unsigned long reg0 asm("0") = qid | 0x80000000UL;
449 register struct ap_queue_status reg1 asm ("1");
450 register unsigned long reg2 asm("2") = 0UL;
451 register unsigned long reg4 asm("4") = (unsigned long) msg;
452 register unsigned long reg5 asm("5") = (unsigned long) length;
453 register unsigned long reg6 asm("6") = 0UL;
454 register unsigned long reg7 asm("7") = 0UL;
455
456
457 asm volatile(
458 "0: .long 0xb2ae0064\n" /* DQAP */
459 " brc 6,0b\n"
460 : "+d" (reg0), "=d" (reg1), "+d" (reg2),
461 "+d" (reg4), "+d" (reg5), "+d" (reg6), "+d" (reg7),
462 "=m" (*(msgblock *) msg) : : "cc" );
463 *psmid = (((unsigned long long) reg6) << 32) + reg7;
464 return reg1;
465}
466
467int ap_recv(ap_qid_t qid, unsigned long long *psmid, void *msg, size_t length)
468{
469 struct ap_queue_status status;
470
471 if (msg == NULL)
472 return -EINVAL;
473 status = __ap_recv(qid, psmid, msg, length);
474 switch (status.response_code) {
475 case AP_RESPONSE_NORMAL:
476 return 0;
477 case AP_RESPONSE_NO_PENDING_REPLY:
478 if (status.queue_empty)
479 return -ENOENT;
480 return -EBUSY;
481 case AP_RESPONSE_RESET_IN_PROGRESS:
482 return -EBUSY;
483 default:
484 return -ENODEV;
485 }
486}
487EXPORT_SYMBOL(ap_recv);
488
489/**
490 * ap_query_queue(): Check if an AP queue is available. 258 * ap_query_queue(): Check if an AP queue is available.
491 * @qid: The AP queue number 259 * @qid: The AP queue number
492 * @queue_depth: Pointer to queue depth value 260 * @queue_depth: Pointer to queue depth value
@@ -500,7 +268,7 @@ static int ap_query_queue(ap_qid_t qid, int *queue_depth, int *device_type,
500 unsigned long info; 268 unsigned long info;
501 int nd; 269 int nd;
502 270
503 if (!ap_test_config_card_id(AP_QID_DEVICE(qid))) 271 if (!ap_test_config_card_id(AP_QID_CARD(qid)))
504 return -ENODEV; 272 return -ENODEV;
505 273
506 status = ap_test_queue(qid, &info); 274 status = ap_test_queue(qid, &info);
@@ -511,8 +279,28 @@ static int ap_query_queue(ap_qid_t qid, int *queue_depth, int *device_type,
511 *facilities = (unsigned int)(info >> 32); 279 *facilities = (unsigned int)(info >> 32);
512 /* Update maximum domain id */ 280 /* Update maximum domain id */
513 nd = (info >> 16) & 0xff; 281 nd = (info >> 16) & 0xff;
282 /* if N bit is available, z13 and newer */
514 if ((info & (1UL << 57)) && nd > 0) 283 if ((info & (1UL << 57)) && nd > 0)
515 ap_max_domain_id = nd; 284 ap_max_domain_id = nd;
285 else /* older machine types */
286 ap_max_domain_id = 15;
287 switch (*device_type) {
288 /* For CEX2 and CEX3 the available functions
289 * are not refrected by the facilities bits.
290 * Instead it is coded into the type. So here
291 * modify the function bits based on the type.
292 */
293 case AP_DEVICE_TYPE_CEX2A:
294 case AP_DEVICE_TYPE_CEX3A:
295 *facilities |= 0x08000000;
296 break;
297 case AP_DEVICE_TYPE_CEX2C:
298 case AP_DEVICE_TYPE_CEX3C:
299 *facilities |= 0x10000000;
300 break;
301 default:
302 break;
303 }
516 return 0; 304 return 0;
517 case AP_RESPONSE_Q_NOT_AVAIL: 305 case AP_RESPONSE_Q_NOT_AVAIL:
518 case AP_RESPONSE_DECONFIGURED: 306 case AP_RESPONSE_DECONFIGURED:
@@ -528,9 +316,7 @@ static int ap_query_queue(ap_qid_t qid, int *queue_depth, int *device_type,
528 } 316 }
529} 317}
530 318
531/* State machine definitions and helpers */ 319void ap_wait(enum ap_wait wait)
532
533static void ap_sm_wait(enum ap_wait wait)
534{ 320{
535 ktime_t hr_time; 321 ktime_t hr_time;
536 322
@@ -559,350 +345,21 @@ static void ap_sm_wait(enum ap_wait wait)
559 } 345 }
560} 346}
561 347
562static enum ap_wait ap_sm_nop(struct ap_device *ap_dev)
563{
564 return AP_WAIT_NONE;
565}
566
567/**
568 * ap_sm_recv(): Receive pending reply messages from an AP device but do
569 * not change the state of the device.
570 * @ap_dev: pointer to the AP device
571 *
572 * Returns AP_WAIT_NONE, AP_WAIT_AGAIN, or AP_WAIT_INTERRUPT
573 */
574static struct ap_queue_status ap_sm_recv(struct ap_device *ap_dev)
575{
576 struct ap_queue_status status;
577 struct ap_message *ap_msg;
578
579 status = __ap_recv(ap_dev->qid, &ap_dev->reply->psmid,
580 ap_dev->reply->message, ap_dev->reply->length);
581 switch (status.response_code) {
582 case AP_RESPONSE_NORMAL:
583 atomic_dec(&ap_poll_requests);
584 ap_dev->queue_count--;
585 if (ap_dev->queue_count > 0)
586 mod_timer(&ap_dev->timeout,
587 jiffies + ap_dev->drv->request_timeout);
588 list_for_each_entry(ap_msg, &ap_dev->pendingq, list) {
589 if (ap_msg->psmid != ap_dev->reply->psmid)
590 continue;
591 list_del_init(&ap_msg->list);
592 ap_dev->pendingq_count--;
593 ap_msg->receive(ap_dev, ap_msg, ap_dev->reply);
594 break;
595 }
596 case AP_RESPONSE_NO_PENDING_REPLY:
597 if (!status.queue_empty || ap_dev->queue_count <= 0)
598 break;
599 /* The card shouldn't forget requests but who knows. */
600 atomic_sub(ap_dev->queue_count, &ap_poll_requests);
601 ap_dev->queue_count = 0;
602 list_splice_init(&ap_dev->pendingq, &ap_dev->requestq);
603 ap_dev->requestq_count += ap_dev->pendingq_count;
604 ap_dev->pendingq_count = 0;
605 break;
606 default:
607 break;
608 }
609 return status;
610}
611
612/**
613 * ap_sm_read(): Receive pending reply messages from an AP device.
614 * @ap_dev: pointer to the AP device
615 *
616 * Returns AP_WAIT_NONE, AP_WAIT_AGAIN, or AP_WAIT_INTERRUPT
617 */
618static enum ap_wait ap_sm_read(struct ap_device *ap_dev)
619{
620 struct ap_queue_status status;
621
622 if (!ap_dev->reply)
623 return AP_WAIT_NONE;
624 status = ap_sm_recv(ap_dev);
625 switch (status.response_code) {
626 case AP_RESPONSE_NORMAL:
627 if (ap_dev->queue_count > 0) {
628 ap_dev->state = AP_STATE_WORKING;
629 return AP_WAIT_AGAIN;
630 }
631 ap_dev->state = AP_STATE_IDLE;
632 return AP_WAIT_NONE;
633 case AP_RESPONSE_NO_PENDING_REPLY:
634 if (ap_dev->queue_count > 0)
635 return AP_WAIT_INTERRUPT;
636 ap_dev->state = AP_STATE_IDLE;
637 return AP_WAIT_NONE;
638 default:
639 ap_dev->state = AP_STATE_BORKED;
640 return AP_WAIT_NONE;
641 }
642}
643
644/**
645 * ap_sm_suspend_read(): Receive pending reply messages from an AP device
646 * without changing the device state in between. In suspend mode we don't
647 * allow sending new requests, therefore just fetch pending replies.
648 * @ap_dev: pointer to the AP device
649 *
650 * Returns AP_WAIT_NONE or AP_WAIT_AGAIN
651 */
652static enum ap_wait ap_sm_suspend_read(struct ap_device *ap_dev)
653{
654 struct ap_queue_status status;
655
656 if (!ap_dev->reply)
657 return AP_WAIT_NONE;
658 status = ap_sm_recv(ap_dev);
659 switch (status.response_code) {
660 case AP_RESPONSE_NORMAL:
661 if (ap_dev->queue_count > 0)
662 return AP_WAIT_AGAIN;
663 /* fall through */
664 default:
665 return AP_WAIT_NONE;
666 }
667}
668
669/**
670 * ap_sm_write(): Send messages from the request queue to an AP device.
671 * @ap_dev: pointer to the AP device
672 *
673 * Returns AP_WAIT_NONE, AP_WAIT_AGAIN, or AP_WAIT_INTERRUPT
674 */
675static enum ap_wait ap_sm_write(struct ap_device *ap_dev)
676{
677 struct ap_queue_status status;
678 struct ap_message *ap_msg;
679
680 if (ap_dev->requestq_count <= 0)
681 return AP_WAIT_NONE;
682 /* Start the next request on the queue. */
683 ap_msg = list_entry(ap_dev->requestq.next, struct ap_message, list);
684 status = __ap_send(ap_dev->qid, ap_msg->psmid,
685 ap_msg->message, ap_msg->length, ap_msg->special);
686 switch (status.response_code) {
687 case AP_RESPONSE_NORMAL:
688 atomic_inc(&ap_poll_requests);
689 ap_dev->queue_count++;
690 if (ap_dev->queue_count == 1)
691 mod_timer(&ap_dev->timeout,
692 jiffies + ap_dev->drv->request_timeout);
693 list_move_tail(&ap_msg->list, &ap_dev->pendingq);
694 ap_dev->requestq_count--;
695 ap_dev->pendingq_count++;
696 if (ap_dev->queue_count < ap_dev->queue_depth) {
697 ap_dev->state = AP_STATE_WORKING;
698 return AP_WAIT_AGAIN;
699 }
700 /* fall through */
701 case AP_RESPONSE_Q_FULL:
702 ap_dev->state = AP_STATE_QUEUE_FULL;
703 return AP_WAIT_INTERRUPT;
704 case AP_RESPONSE_RESET_IN_PROGRESS:
705 ap_dev->state = AP_STATE_RESET_WAIT;
706 return AP_WAIT_TIMEOUT;
707 case AP_RESPONSE_MESSAGE_TOO_BIG:
708 case AP_RESPONSE_REQ_FAC_NOT_INST:
709 list_del_init(&ap_msg->list);
710 ap_dev->requestq_count--;
711 ap_msg->rc = -EINVAL;
712 ap_msg->receive(ap_dev, ap_msg, NULL);
713 return AP_WAIT_AGAIN;
714 default:
715 ap_dev->state = AP_STATE_BORKED;
716 return AP_WAIT_NONE;
717 }
718}
719
720/**
721 * ap_sm_read_write(): Send and receive messages to/from an AP device.
722 * @ap_dev: pointer to the AP device
723 *
724 * Returns AP_WAIT_NONE, AP_WAIT_AGAIN, or AP_WAIT_INTERRUPT
725 */
726static enum ap_wait ap_sm_read_write(struct ap_device *ap_dev)
727{
728 return min(ap_sm_read(ap_dev), ap_sm_write(ap_dev));
729}
730
731/**
732 * ap_sm_reset(): Reset an AP queue.
733 * @qid: The AP queue number
734 *
735 * Submit the Reset command to an AP queue.
736 */
737static enum ap_wait ap_sm_reset(struct ap_device *ap_dev)
738{
739 struct ap_queue_status status;
740
741 status = ap_reset_queue(ap_dev->qid);
742 switch (status.response_code) {
743 case AP_RESPONSE_NORMAL:
744 case AP_RESPONSE_RESET_IN_PROGRESS:
745 ap_dev->state = AP_STATE_RESET_WAIT;
746 ap_dev->interrupt = AP_INTR_DISABLED;
747 return AP_WAIT_TIMEOUT;
748 case AP_RESPONSE_BUSY:
749 return AP_WAIT_TIMEOUT;
750 case AP_RESPONSE_Q_NOT_AVAIL:
751 case AP_RESPONSE_DECONFIGURED:
752 case AP_RESPONSE_CHECKSTOPPED:
753 default:
754 ap_dev->state = AP_STATE_BORKED;
755 return AP_WAIT_NONE;
756 }
757}
758
759/**
760 * ap_sm_reset_wait(): Test queue for completion of the reset operation
761 * @ap_dev: pointer to the AP device
762 *
763 * Returns AP_POLL_IMMEDIATELY, AP_POLL_AFTER_TIMEROUT or 0.
764 */
765static enum ap_wait ap_sm_reset_wait(struct ap_device *ap_dev)
766{
767 struct ap_queue_status status;
768 unsigned long info;
769
770 if (ap_dev->queue_count > 0 && ap_dev->reply)
771 /* Try to read a completed message and get the status */
772 status = ap_sm_recv(ap_dev);
773 else
774 /* Get the status with TAPQ */
775 status = ap_test_queue(ap_dev->qid, &info);
776
777 switch (status.response_code) {
778 case AP_RESPONSE_NORMAL:
779 if (ap_using_interrupts() &&
780 ap_queue_enable_interruption(ap_dev,
781 ap_airq.lsi_ptr) == 0)
782 ap_dev->state = AP_STATE_SETIRQ_WAIT;
783 else
784 ap_dev->state = (ap_dev->queue_count > 0) ?
785 AP_STATE_WORKING : AP_STATE_IDLE;
786 return AP_WAIT_AGAIN;
787 case AP_RESPONSE_BUSY:
788 case AP_RESPONSE_RESET_IN_PROGRESS:
789 return AP_WAIT_TIMEOUT;
790 case AP_RESPONSE_Q_NOT_AVAIL:
791 case AP_RESPONSE_DECONFIGURED:
792 case AP_RESPONSE_CHECKSTOPPED:
793 default:
794 ap_dev->state = AP_STATE_BORKED;
795 return AP_WAIT_NONE;
796 }
797}
798
799/**
800 * ap_sm_setirq_wait(): Test queue for completion of the irq enablement
801 * @ap_dev: pointer to the AP device
802 *
803 * Returns AP_POLL_IMMEDIATELY, AP_POLL_AFTER_TIMEROUT or 0.
804 */
805static enum ap_wait ap_sm_setirq_wait(struct ap_device *ap_dev)
806{
807 struct ap_queue_status status;
808 unsigned long info;
809
810 if (ap_dev->queue_count > 0 && ap_dev->reply)
811 /* Try to read a completed message and get the status */
812 status = ap_sm_recv(ap_dev);
813 else
814 /* Get the status with TAPQ */
815 status = ap_test_queue(ap_dev->qid, &info);
816
817 if (status.int_enabled == 1) {
818 /* Irqs are now enabled */
819 ap_dev->interrupt = AP_INTR_ENABLED;
820 ap_dev->state = (ap_dev->queue_count > 0) ?
821 AP_STATE_WORKING : AP_STATE_IDLE;
822 }
823
824 switch (status.response_code) {
825 case AP_RESPONSE_NORMAL:
826 if (ap_dev->queue_count > 0)
827 return AP_WAIT_AGAIN;
828 /* fallthrough */
829 case AP_RESPONSE_NO_PENDING_REPLY:
830 return AP_WAIT_TIMEOUT;
831 default:
832 ap_dev->state = AP_STATE_BORKED;
833 return AP_WAIT_NONE;
834 }
835}
836
837/*
838 * AP state machine jump table
839 */
840static ap_func_t *ap_jumptable[NR_AP_STATES][NR_AP_EVENTS] = {
841 [AP_STATE_RESET_START] = {
842 [AP_EVENT_POLL] = ap_sm_reset,
843 [AP_EVENT_TIMEOUT] = ap_sm_nop,
844 },
845 [AP_STATE_RESET_WAIT] = {
846 [AP_EVENT_POLL] = ap_sm_reset_wait,
847 [AP_EVENT_TIMEOUT] = ap_sm_nop,
848 },
849 [AP_STATE_SETIRQ_WAIT] = {
850 [AP_EVENT_POLL] = ap_sm_setirq_wait,
851 [AP_EVENT_TIMEOUT] = ap_sm_nop,
852 },
853 [AP_STATE_IDLE] = {
854 [AP_EVENT_POLL] = ap_sm_write,
855 [AP_EVENT_TIMEOUT] = ap_sm_nop,
856 },
857 [AP_STATE_WORKING] = {
858 [AP_EVENT_POLL] = ap_sm_read_write,
859 [AP_EVENT_TIMEOUT] = ap_sm_reset,
860 },
861 [AP_STATE_QUEUE_FULL] = {
862 [AP_EVENT_POLL] = ap_sm_read,
863 [AP_EVENT_TIMEOUT] = ap_sm_reset,
864 },
865 [AP_STATE_SUSPEND_WAIT] = {
866 [AP_EVENT_POLL] = ap_sm_suspend_read,
867 [AP_EVENT_TIMEOUT] = ap_sm_nop,
868 },
869 [AP_STATE_BORKED] = {
870 [AP_EVENT_POLL] = ap_sm_nop,
871 [AP_EVENT_TIMEOUT] = ap_sm_nop,
872 },
873};
874
875static inline enum ap_wait ap_sm_event(struct ap_device *ap_dev,
876 enum ap_event event)
877{
878 return ap_jumptable[ap_dev->state][event](ap_dev);
879}
880
881static inline enum ap_wait ap_sm_event_loop(struct ap_device *ap_dev,
882 enum ap_event event)
883{
884 enum ap_wait wait;
885
886 while ((wait = ap_sm_event(ap_dev, event)) == AP_WAIT_AGAIN)
887 ;
888 return wait;
889}
890
891/** 348/**
892 * ap_request_timeout(): Handling of request timeouts 349 * ap_request_timeout(): Handling of request timeouts
893 * @data: Holds the AP device. 350 * @data: Holds the AP device.
894 * 351 *
895 * Handles request timeouts. 352 * Handles request timeouts.
896 */ 353 */
897static void ap_request_timeout(unsigned long data) 354void ap_request_timeout(unsigned long data)
898{ 355{
899 struct ap_device *ap_dev = (struct ap_device *) data; 356 struct ap_queue *aq = (struct ap_queue *) data;
900 357
901 if (ap_suspend_flag) 358 if (ap_suspend_flag)
902 return; 359 return;
903 spin_lock_bh(&ap_dev->lock); 360 spin_lock_bh(&aq->lock);
904 ap_sm_wait(ap_sm_event(ap_dev, AP_EVENT_TIMEOUT)); 361 ap_wait(ap_sm_event(aq, AP_EVENT_TIMEOUT));
905 spin_unlock_bh(&ap_dev->lock); 362 spin_unlock_bh(&aq->lock);
906} 363}
907 364
908/** 365/**
@@ -937,7 +394,8 @@ static void ap_interrupt_handler(struct airq_struct *airq)
937 */ 394 */
938static void ap_tasklet_fn(unsigned long dummy) 395static void ap_tasklet_fn(unsigned long dummy)
939{ 396{
940 struct ap_device *ap_dev; 397 struct ap_card *ac;
398 struct ap_queue *aq;
941 enum ap_wait wait = AP_WAIT_NONE; 399 enum ap_wait wait = AP_WAIT_NONE;
942 400
943 /* Reset the indicator if interrupts are used. Thus new interrupts can 401 /* Reset the indicator if interrupts are used. Thus new interrupts can
@@ -947,14 +405,35 @@ static void ap_tasklet_fn(unsigned long dummy)
947 if (ap_using_interrupts()) 405 if (ap_using_interrupts())
948 xchg(ap_airq.lsi_ptr, 0); 406 xchg(ap_airq.lsi_ptr, 0);
949 407
950 spin_lock(&ap_device_list_lock); 408 spin_lock_bh(&ap_list_lock);
951 list_for_each_entry(ap_dev, &ap_device_list, list) { 409 for_each_ap_card(ac) {
952 spin_lock_bh(&ap_dev->lock); 410 for_each_ap_queue(aq, ac) {
953 wait = min(wait, ap_sm_event_loop(ap_dev, AP_EVENT_POLL)); 411 spin_lock_bh(&aq->lock);
954 spin_unlock_bh(&ap_dev->lock); 412 wait = min(wait, ap_sm_event_loop(aq, AP_EVENT_POLL));
413 spin_unlock_bh(&aq->lock);
414 }
955 } 415 }
956 spin_unlock(&ap_device_list_lock); 416 spin_unlock_bh(&ap_list_lock);
957 ap_sm_wait(wait); 417
418 ap_wait(wait);
419}
420
421static int ap_pending_requests(void)
422{
423 struct ap_card *ac;
424 struct ap_queue *aq;
425
426 spin_lock_bh(&ap_list_lock);
427 for_each_ap_card(ac) {
428 for_each_ap_queue(aq, ac) {
429 if (aq->queue_count == 0)
430 continue;
431 spin_unlock_bh(&ap_list_lock);
432 return 1;
433 }
434 }
435 spin_unlock_bh(&ap_list_lock);
436 return 0;
958} 437}
959 438
960/** 439/**
@@ -976,8 +455,7 @@ static int ap_poll_thread(void *data)
976 while (!kthread_should_stop()) { 455 while (!kthread_should_stop()) {
977 add_wait_queue(&ap_poll_wait, &wait); 456 add_wait_queue(&ap_poll_wait, &wait);
978 set_current_state(TASK_INTERRUPTIBLE); 457 set_current_state(TASK_INTERRUPTIBLE);
979 if (ap_suspend_flag || 458 if (ap_suspend_flag || !ap_pending_requests()) {
980 atomic_read(&ap_poll_requests) <= 0) {
981 schedule(); 459 schedule();
982 try_to_freeze(); 460 try_to_freeze();
983 } 461 }
@@ -989,7 +467,8 @@ static int ap_poll_thread(void *data)
989 continue; 467 continue;
990 } 468 }
991 ap_tasklet_fn(0); 469 ap_tasklet_fn(0);
992 } while (!kthread_should_stop()); 470 }
471
993 return 0; 472 return 0;
994} 473}
995 474
@@ -1018,207 +497,8 @@ static void ap_poll_thread_stop(void)
1018 mutex_unlock(&ap_poll_thread_mutex); 497 mutex_unlock(&ap_poll_thread_mutex);
1019} 498}
1020 499
1021/** 500#define is_card_dev(x) ((x)->parent == ap_root_device)
1022 * ap_queue_message(): Queue a request to an AP device. 501#define is_queue_dev(x) ((x)->parent != ap_root_device)
1023 * @ap_dev: The AP device to queue the message to
1024 * @ap_msg: The message that is to be added
1025 */
1026void ap_queue_message(struct ap_device *ap_dev, struct ap_message *ap_msg)
1027{
1028 /* For asynchronous message handling a valid receive-callback
1029 * is required. */
1030 BUG_ON(!ap_msg->receive);
1031
1032 spin_lock_bh(&ap_dev->lock);
1033 /* Queue the message. */
1034 list_add_tail(&ap_msg->list, &ap_dev->requestq);
1035 ap_dev->requestq_count++;
1036 ap_dev->total_request_count++;
1037 /* Send/receive as many request from the queue as possible. */
1038 ap_sm_wait(ap_sm_event_loop(ap_dev, AP_EVENT_POLL));
1039 spin_unlock_bh(&ap_dev->lock);
1040}
1041EXPORT_SYMBOL(ap_queue_message);
1042
1043/**
1044 * ap_cancel_message(): Cancel a crypto request.
1045 * @ap_dev: The AP device that has the message queued
1046 * @ap_msg: The message that is to be removed
1047 *
1048 * Cancel a crypto request. This is done by removing the request
1049 * from the device pending or request queue. Note that the
1050 * request stays on the AP queue. When it finishes the message
1051 * reply will be discarded because the psmid can't be found.
1052 */
1053void ap_cancel_message(struct ap_device *ap_dev, struct ap_message *ap_msg)
1054{
1055 struct ap_message *tmp;
1056
1057 spin_lock_bh(&ap_dev->lock);
1058 if (!list_empty(&ap_msg->list)) {
1059 list_for_each_entry(tmp, &ap_dev->pendingq, list)
1060 if (tmp->psmid == ap_msg->psmid) {
1061 ap_dev->pendingq_count--;
1062 goto found;
1063 }
1064 ap_dev->requestq_count--;
1065found:
1066 list_del_init(&ap_msg->list);
1067 }
1068 spin_unlock_bh(&ap_dev->lock);
1069}
1070EXPORT_SYMBOL(ap_cancel_message);
1071
1072/*
1073 * AP device related attributes.
1074 */
1075static ssize_t ap_hwtype_show(struct device *dev,
1076 struct device_attribute *attr, char *buf)
1077{
1078 struct ap_device *ap_dev = to_ap_dev(dev);
1079 return snprintf(buf, PAGE_SIZE, "%d\n", ap_dev->device_type);
1080}
1081
1082static DEVICE_ATTR(hwtype, 0444, ap_hwtype_show, NULL);
1083
1084static ssize_t ap_raw_hwtype_show(struct device *dev,
1085 struct device_attribute *attr, char *buf)
1086{
1087 struct ap_device *ap_dev = to_ap_dev(dev);
1088
1089 return snprintf(buf, PAGE_SIZE, "%d\n", ap_dev->raw_hwtype);
1090}
1091
1092static DEVICE_ATTR(raw_hwtype, 0444, ap_raw_hwtype_show, NULL);
1093
1094static ssize_t ap_depth_show(struct device *dev, struct device_attribute *attr,
1095 char *buf)
1096{
1097 struct ap_device *ap_dev = to_ap_dev(dev);
1098 return snprintf(buf, PAGE_SIZE, "%d\n", ap_dev->queue_depth);
1099}
1100
1101static DEVICE_ATTR(depth, 0444, ap_depth_show, NULL);
1102static ssize_t ap_request_count_show(struct device *dev,
1103 struct device_attribute *attr,
1104 char *buf)
1105{
1106 struct ap_device *ap_dev = to_ap_dev(dev);
1107 int rc;
1108
1109 spin_lock_bh(&ap_dev->lock);
1110 rc = snprintf(buf, PAGE_SIZE, "%d\n", ap_dev->total_request_count);
1111 spin_unlock_bh(&ap_dev->lock);
1112 return rc;
1113}
1114
1115static DEVICE_ATTR(request_count, 0444, ap_request_count_show, NULL);
1116
1117static ssize_t ap_requestq_count_show(struct device *dev,
1118 struct device_attribute *attr, char *buf)
1119{
1120 struct ap_device *ap_dev = to_ap_dev(dev);
1121 int rc;
1122
1123 spin_lock_bh(&ap_dev->lock);
1124 rc = snprintf(buf, PAGE_SIZE, "%d\n", ap_dev->requestq_count);
1125 spin_unlock_bh(&ap_dev->lock);
1126 return rc;
1127}
1128
1129static DEVICE_ATTR(requestq_count, 0444, ap_requestq_count_show, NULL);
1130
1131static ssize_t ap_pendingq_count_show(struct device *dev,
1132 struct device_attribute *attr, char *buf)
1133{
1134 struct ap_device *ap_dev = to_ap_dev(dev);
1135 int rc;
1136
1137 spin_lock_bh(&ap_dev->lock);
1138 rc = snprintf(buf, PAGE_SIZE, "%d\n", ap_dev->pendingq_count);
1139 spin_unlock_bh(&ap_dev->lock);
1140 return rc;
1141}
1142
1143static DEVICE_ATTR(pendingq_count, 0444, ap_pendingq_count_show, NULL);
1144
1145static ssize_t ap_reset_show(struct device *dev,
1146 struct device_attribute *attr, char *buf)
1147{
1148 struct ap_device *ap_dev = to_ap_dev(dev);
1149 int rc = 0;
1150
1151 spin_lock_bh(&ap_dev->lock);
1152 switch (ap_dev->state) {
1153 case AP_STATE_RESET_START:
1154 case AP_STATE_RESET_WAIT:
1155 rc = snprintf(buf, PAGE_SIZE, "Reset in progress.\n");
1156 break;
1157 case AP_STATE_WORKING:
1158 case AP_STATE_QUEUE_FULL:
1159 rc = snprintf(buf, PAGE_SIZE, "Reset Timer armed.\n");
1160 break;
1161 default:
1162 rc = snprintf(buf, PAGE_SIZE, "No Reset Timer set.\n");
1163 }
1164 spin_unlock_bh(&ap_dev->lock);
1165 return rc;
1166}
1167
1168static DEVICE_ATTR(reset, 0444, ap_reset_show, NULL);
1169
1170static ssize_t ap_interrupt_show(struct device *dev,
1171 struct device_attribute *attr, char *buf)
1172{
1173 struct ap_device *ap_dev = to_ap_dev(dev);
1174 int rc = 0;
1175
1176 spin_lock_bh(&ap_dev->lock);
1177 if (ap_dev->state == AP_STATE_SETIRQ_WAIT)
1178 rc = snprintf(buf, PAGE_SIZE, "Enable Interrupt pending.\n");
1179 else if (ap_dev->interrupt == AP_INTR_ENABLED)
1180 rc = snprintf(buf, PAGE_SIZE, "Interrupts enabled.\n");
1181 else
1182 rc = snprintf(buf, PAGE_SIZE, "Interrupts disabled.\n");
1183 spin_unlock_bh(&ap_dev->lock);
1184 return rc;
1185}
1186
1187static DEVICE_ATTR(interrupt, 0444, ap_interrupt_show, NULL);
1188
1189static ssize_t ap_modalias_show(struct device *dev,
1190 struct device_attribute *attr, char *buf)
1191{
1192 return sprintf(buf, "ap:t%02X\n", to_ap_dev(dev)->device_type);
1193}
1194
1195static DEVICE_ATTR(modalias, 0444, ap_modalias_show, NULL);
1196
1197static ssize_t ap_functions_show(struct device *dev,
1198 struct device_attribute *attr, char *buf)
1199{
1200 struct ap_device *ap_dev = to_ap_dev(dev);
1201 return snprintf(buf, PAGE_SIZE, "0x%08X\n", ap_dev->functions);
1202}
1203
1204static DEVICE_ATTR(ap_functions, 0444, ap_functions_show, NULL);
1205
1206static struct attribute *ap_dev_attrs[] = {
1207 &dev_attr_hwtype.attr,
1208 &dev_attr_raw_hwtype.attr,
1209 &dev_attr_depth.attr,
1210 &dev_attr_request_count.attr,
1211 &dev_attr_requestq_count.attr,
1212 &dev_attr_pendingq_count.attr,
1213 &dev_attr_reset.attr,
1214 &dev_attr_interrupt.attr,
1215 &dev_attr_modalias.attr,
1216 &dev_attr_ap_functions.attr,
1217 NULL
1218};
1219static struct attribute_group ap_dev_attr_group = {
1220 .attrs = ap_dev_attrs
1221};
1222 502
1223/** 503/**
1224 * ap_bus_match() 504 * ap_bus_match()
@@ -1229,7 +509,6 @@ static struct attribute_group ap_dev_attr_group = {
1229 */ 509 */
1230static int ap_bus_match(struct device *dev, struct device_driver *drv) 510static int ap_bus_match(struct device *dev, struct device_driver *drv)
1231{ 511{
1232 struct ap_device *ap_dev = to_ap_dev(dev);
1233 struct ap_driver *ap_drv = to_ap_drv(drv); 512 struct ap_driver *ap_drv = to_ap_drv(drv);
1234 struct ap_device_id *id; 513 struct ap_device_id *id;
1235 514
@@ -1238,10 +517,14 @@ static int ap_bus_match(struct device *dev, struct device_driver *drv)
1238 * supported types of the device_driver. 517 * supported types of the device_driver.
1239 */ 518 */
1240 for (id = ap_drv->ids; id->match_flags; id++) { 519 for (id = ap_drv->ids; id->match_flags; id++) {
1241 if ((id->match_flags & AP_DEVICE_ID_MATCH_DEVICE_TYPE) && 520 if (is_card_dev(dev) &&
1242 (id->dev_type != ap_dev->device_type)) 521 id->match_flags & AP_DEVICE_ID_MATCH_CARD_TYPE &&
1243 continue; 522 id->dev_type == to_ap_dev(dev)->device_type)
1244 return 1; 523 return 1;
524 if (is_queue_dev(dev) &&
525 id->match_flags & AP_DEVICE_ID_MATCH_QUEUE_TYPE &&
526 id->dev_type == to_ap_dev(dev)->device_type)
527 return 1;
1245 } 528 }
1246 return 0; 529 return 0;
1247} 530}
@@ -1277,18 +560,24 @@ static int ap_dev_suspend(struct device *dev)
1277{ 560{
1278 struct ap_device *ap_dev = to_ap_dev(dev); 561 struct ap_device *ap_dev = to_ap_dev(dev);
1279 562
1280 /* Poll on the device until all requests are finished. */ 563 if (ap_dev->drv && ap_dev->drv->suspend)
1281 spin_lock_bh(&ap_dev->lock); 564 ap_dev->drv->suspend(ap_dev);
1282 ap_dev->state = AP_STATE_SUSPEND_WAIT; 565 return 0;
1283 while (ap_sm_event(ap_dev, AP_EVENT_POLL) != AP_WAIT_NONE) 566}
1284 ; 567
1285 ap_dev->state = AP_STATE_BORKED; 568static int ap_dev_resume(struct device *dev)
1286 spin_unlock_bh(&ap_dev->lock); 569{
570 struct ap_device *ap_dev = to_ap_dev(dev);
571
572 if (ap_dev->drv && ap_dev->drv->resume)
573 ap_dev->drv->resume(ap_dev);
1287 return 0; 574 return 0;
1288} 575}
1289 576
1290static void ap_bus_suspend(void) 577static void ap_bus_suspend(void)
1291{ 578{
579 AP_DBF(DBF_DEBUG, "ap_bus_suspend running\n");
580
1292 ap_suspend_flag = 1; 581 ap_suspend_flag = 1;
1293 /* 582 /*
1294 * Disable scanning for devices, thus we do not want to scan 583 * Disable scanning for devices, thus we do not want to scan
@@ -1298,9 +587,25 @@ static void ap_bus_suspend(void)
1298 tasklet_disable(&ap_tasklet); 587 tasklet_disable(&ap_tasklet);
1299} 588}
1300 589
1301static int __ap_devices_unregister(struct device *dev, void *dummy) 590static int __ap_card_devices_unregister(struct device *dev, void *dummy)
591{
592 if (is_card_dev(dev))
593 device_unregister(dev);
594 return 0;
595}
596
597static int __ap_queue_devices_unregister(struct device *dev, void *dummy)
1302{ 598{
1303 device_unregister(dev); 599 if (is_queue_dev(dev))
600 device_unregister(dev);
601 return 0;
602}
603
604static int __ap_queue_devices_with_id_unregister(struct device *dev, void *data)
605{
606 if (is_queue_dev(dev) &&
607 AP_QID_CARD(to_ap_queue(dev)->qid) == (int)(long) data)
608 device_unregister(dev);
1304 return 0; 609 return 0;
1305} 610}
1306 611
@@ -1308,8 +613,15 @@ static void ap_bus_resume(void)
1308{ 613{
1309 int rc; 614 int rc;
1310 615
1311 /* Unconditionally remove all AP devices */ 616 AP_DBF(DBF_DEBUG, "ap_bus_resume running\n");
1312 bus_for_each_dev(&ap_bus_type, NULL, NULL, __ap_devices_unregister); 617
618 /* remove all queue devices */
619 bus_for_each_dev(&ap_bus_type, NULL, NULL,
620 __ap_queue_devices_unregister);
621 /* remove all card devices */
622 bus_for_each_dev(&ap_bus_type, NULL, NULL,
623 __ap_card_devices_unregister);
624
1313 /* Reset thin interrupt setting */ 625 /* Reset thin interrupt setting */
1314 if (ap_interrupts_available() && !ap_using_interrupts()) { 626 if (ap_interrupts_available() && !ap_using_interrupts()) {
1315 rc = register_adapter_interrupt(&ap_airq); 627 rc = register_adapter_interrupt(&ap_airq);
@@ -1351,7 +663,7 @@ static struct notifier_block ap_power_notifier = {
1351 .notifier_call = ap_power_event, 663 .notifier_call = ap_power_event,
1352}; 664};
1353 665
1354static SIMPLE_DEV_PM_OPS(ap_bus_pm_ops, ap_dev_suspend, NULL); 666static SIMPLE_DEV_PM_OPS(ap_bus_pm_ops, ap_dev_suspend, ap_dev_resume);
1355 667
1356static struct bus_type ap_bus_type = { 668static struct bus_type ap_bus_type = {
1357 .name = "ap", 669 .name = "ap",
@@ -1360,17 +672,6 @@ static struct bus_type ap_bus_type = {
1360 .pm = &ap_bus_pm_ops, 672 .pm = &ap_bus_pm_ops,
1361}; 673};
1362 674
1363void ap_device_init_reply(struct ap_device *ap_dev,
1364 struct ap_message *reply)
1365{
1366 ap_dev->reply = reply;
1367
1368 spin_lock_bh(&ap_dev->lock);
1369 ap_sm_wait(ap_sm_event(ap_dev, AP_EVENT_POLL));
1370 spin_unlock_bh(&ap_dev->lock);
1371}
1372EXPORT_SYMBOL(ap_device_init_reply);
1373
1374static int ap_device_probe(struct device *dev) 675static int ap_device_probe(struct device *dev)
1375{ 676{
1376 struct ap_device *ap_dev = to_ap_dev(dev); 677 struct ap_device *ap_dev = to_ap_dev(dev);
@@ -1384,61 +685,22 @@ static int ap_device_probe(struct device *dev)
1384 return rc; 685 return rc;
1385} 686}
1386 687
1387/**
1388 * __ap_flush_queue(): Flush requests.
1389 * @ap_dev: Pointer to the AP device
1390 *
1391 * Flush all requests from the request/pending queue of an AP device.
1392 */
1393static void __ap_flush_queue(struct ap_device *ap_dev)
1394{
1395 struct ap_message *ap_msg, *next;
1396
1397 list_for_each_entry_safe(ap_msg, next, &ap_dev->pendingq, list) {
1398 list_del_init(&ap_msg->list);
1399 ap_dev->pendingq_count--;
1400 ap_msg->rc = -EAGAIN;
1401 ap_msg->receive(ap_dev, ap_msg, NULL);
1402 }
1403 list_for_each_entry_safe(ap_msg, next, &ap_dev->requestq, list) {
1404 list_del_init(&ap_msg->list);
1405 ap_dev->requestq_count--;
1406 ap_msg->rc = -EAGAIN;
1407 ap_msg->receive(ap_dev, ap_msg, NULL);
1408 }
1409}
1410
1411void ap_flush_queue(struct ap_device *ap_dev)
1412{
1413 spin_lock_bh(&ap_dev->lock);
1414 __ap_flush_queue(ap_dev);
1415 spin_unlock_bh(&ap_dev->lock);
1416}
1417EXPORT_SYMBOL(ap_flush_queue);
1418
1419static int ap_device_remove(struct device *dev) 688static int ap_device_remove(struct device *dev)
1420{ 689{
1421 struct ap_device *ap_dev = to_ap_dev(dev); 690 struct ap_device *ap_dev = to_ap_dev(dev);
1422 struct ap_driver *ap_drv = ap_dev->drv; 691 struct ap_driver *ap_drv = ap_dev->drv;
1423 692
1424 ap_flush_queue(ap_dev); 693 spin_lock_bh(&ap_list_lock);
1425 del_timer_sync(&ap_dev->timeout); 694 if (is_card_dev(dev))
1426 spin_lock_bh(&ap_device_list_lock); 695 list_del_init(&to_ap_card(dev)->list);
1427 list_del_init(&ap_dev->list); 696 else
1428 spin_unlock_bh(&ap_device_list_lock); 697 list_del_init(&to_ap_queue(dev)->list);
698 spin_unlock_bh(&ap_list_lock);
1429 if (ap_drv->remove) 699 if (ap_drv->remove)
1430 ap_drv->remove(ap_dev); 700 ap_drv->remove(ap_dev);
1431 spin_lock_bh(&ap_dev->lock);
1432 atomic_sub(ap_dev->queue_count, &ap_poll_requests);
1433 spin_unlock_bh(&ap_dev->lock);
1434 return 0; 701 return 0;
1435} 702}
1436 703
1437static void ap_device_release(struct device *dev)
1438{
1439 kfree(to_ap_dev(dev));
1440}
1441
1442int ap_driver_register(struct ap_driver *ap_drv, struct module *owner, 704int ap_driver_register(struct ap_driver *ap_drv, struct module *owner,
1443 char *name) 705 char *name)
1444{ 706{
@@ -1481,18 +743,30 @@ static ssize_t ap_domain_show(struct bus_type *bus, char *buf)
1481 return snprintf(buf, PAGE_SIZE, "%d\n", ap_domain_index); 743 return snprintf(buf, PAGE_SIZE, "%d\n", ap_domain_index);
1482} 744}
1483 745
1484static BUS_ATTR(ap_domain, 0444, ap_domain_show, NULL); 746static ssize_t ap_domain_store(struct bus_type *bus,
747 const char *buf, size_t count)
748{
749 int domain;
750
751 if (sscanf(buf, "%i\n", &domain) != 1 ||
752 domain < 0 || domain > ap_max_domain_id)
753 return -EINVAL;
754 spin_lock_bh(&ap_domain_lock);
755 ap_domain_index = domain;
756 spin_unlock_bh(&ap_domain_lock);
757
758 AP_DBF(DBF_DEBUG, "store new default domain=%d\n", domain);
759
760 return count;
761}
762
763static BUS_ATTR(ap_domain, 0644, ap_domain_show, ap_domain_store);
1485 764
1486static ssize_t ap_control_domain_mask_show(struct bus_type *bus, char *buf) 765static ssize_t ap_control_domain_mask_show(struct bus_type *bus, char *buf)
1487{ 766{
1488 if (!ap_configuration) /* QCI not supported */ 767 if (!ap_configuration) /* QCI not supported */
1489 return snprintf(buf, PAGE_SIZE, "not supported\n"); 768 return snprintf(buf, PAGE_SIZE, "not supported\n");
1490 if (!test_facility(76)) 769
1491 /* format 0 - 16 bit domain field */
1492 return snprintf(buf, PAGE_SIZE, "%08x%08x\n",
1493 ap_configuration->adm[0],
1494 ap_configuration->adm[1]);
1495 /* format 1 - 256 bit domain field */
1496 return snprintf(buf, PAGE_SIZE, 770 return snprintf(buf, PAGE_SIZE,
1497 "0x%08x%08x%08x%08x%08x%08x%08x%08x\n", 771 "0x%08x%08x%08x%08x%08x%08x%08x%08x\n",
1498 ap_configuration->adm[0], ap_configuration->adm[1], 772 ap_configuration->adm[0], ap_configuration->adm[1],
@@ -1504,6 +778,22 @@ static ssize_t ap_control_domain_mask_show(struct bus_type *bus, char *buf)
1504static BUS_ATTR(ap_control_domain_mask, 0444, 778static BUS_ATTR(ap_control_domain_mask, 0444,
1505 ap_control_domain_mask_show, NULL); 779 ap_control_domain_mask_show, NULL);
1506 780
781static ssize_t ap_usage_domain_mask_show(struct bus_type *bus, char *buf)
782{
783 if (!ap_configuration) /* QCI not supported */
784 return snprintf(buf, PAGE_SIZE, "not supported\n");
785
786 return snprintf(buf, PAGE_SIZE,
787 "0x%08x%08x%08x%08x%08x%08x%08x%08x\n",
788 ap_configuration->aqm[0], ap_configuration->aqm[1],
789 ap_configuration->aqm[2], ap_configuration->aqm[3],
790 ap_configuration->aqm[4], ap_configuration->aqm[5],
791 ap_configuration->aqm[6], ap_configuration->aqm[7]);
792}
793
794static BUS_ATTR(ap_usage_domain_mask, 0444,
795 ap_usage_domain_mask_show, NULL);
796
1507static ssize_t ap_config_time_show(struct bus_type *bus, char *buf) 797static ssize_t ap_config_time_show(struct bus_type *bus, char *buf)
1508{ 798{
1509 return snprintf(buf, PAGE_SIZE, "%d\n", ap_config_time); 799 return snprintf(buf, PAGE_SIZE, "%d\n", ap_config_time);
@@ -1599,6 +889,7 @@ static BUS_ATTR(ap_max_domain_id, 0444, ap_max_domain_id_show, NULL);
1599static struct bus_attribute *const ap_bus_attrs[] = { 889static struct bus_attribute *const ap_bus_attrs[] = {
1600 &bus_attr_ap_domain, 890 &bus_attr_ap_domain,
1601 &bus_attr_ap_control_domain_mask, 891 &bus_attr_ap_control_domain_mask,
892 &bus_attr_ap_usage_domain_mask,
1602 &bus_attr_config_time, 893 &bus_attr_config_time,
1603 &bus_attr_poll_thread, 894 &bus_attr_poll_thread,
1604 &bus_attr_ap_interrupts, 895 &bus_attr_ap_interrupts,
@@ -1623,9 +914,12 @@ static int ap_select_domain(void)
1623 * the "domain=" parameter or the domain with the maximum number 914 * the "domain=" parameter or the domain with the maximum number
1624 * of devices. 915 * of devices.
1625 */ 916 */
1626 if (ap_domain_index >= 0) 917 spin_lock_bh(&ap_domain_lock);
918 if (ap_domain_index >= 0) {
1627 /* Domain has already been selected. */ 919 /* Domain has already been selected. */
920 spin_unlock_bh(&ap_domain_lock);
1628 return 0; 921 return 0;
922 }
1629 best_domain = -1; 923 best_domain = -1;
1630 max_count = 0; 924 max_count = 0;
1631 for (i = 0; i < AP_DOMAINS; i++) { 925 for (i = 0; i < AP_DOMAINS; i++) {
@@ -1647,109 +941,171 @@ static int ap_select_domain(void)
1647 } 941 }
1648 if (best_domain >= 0){ 942 if (best_domain >= 0){
1649 ap_domain_index = best_domain; 943 ap_domain_index = best_domain;
944 spin_unlock_bh(&ap_domain_lock);
1650 return 0; 945 return 0;
1651 } 946 }
947 spin_unlock_bh(&ap_domain_lock);
1652 return -ENODEV; 948 return -ENODEV;
1653} 949}
1654 950
1655/** 951/*
1656 * __ap_scan_bus(): Scan the AP bus. 952 * helper function to be used with bus_find_dev
1657 * @dev: Pointer to device 953 * matches for the card device with the given id
1658 * @data: Pointer to data
1659 *
1660 * Scan the AP bus for new devices.
1661 */ 954 */
1662static int __ap_scan_bus(struct device *dev, void *data) 955static int __match_card_device_with_id(struct device *dev, void *data)
1663{ 956{
1664 return to_ap_dev(dev)->qid == (ap_qid_t)(unsigned long) data; 957 return is_card_dev(dev) && to_ap_card(dev)->id == (int)(long) data;
1665} 958}
1666 959
960/* helper function to be used with bus_find_dev
961 * matches for the queue device with a given qid
962 */
963static int __match_queue_device_with_qid(struct device *dev, void *data)
964{
965 return is_queue_dev(dev) && to_ap_queue(dev)->qid == (int)(long) data;
966}
967
968/**
969 * ap_scan_bus(): Scan the AP bus for new devices
970 * Runs periodically, workqueue timer (ap_config_time)
971 */
1667static void ap_scan_bus(struct work_struct *unused) 972static void ap_scan_bus(struct work_struct *unused)
1668{ 973{
1669 struct ap_device *ap_dev; 974 struct ap_queue *aq;
975 struct ap_card *ac;
1670 struct device *dev; 976 struct device *dev;
1671 ap_qid_t qid; 977 ap_qid_t qid;
1672 int queue_depth = 0, device_type = 0; 978 int depth = 0, type = 0;
1673 unsigned int device_functions = 0; 979 unsigned int functions = 0;
1674 int rc, i, borked; 980 int rc, id, dom, borked, domains;
981
982 AP_DBF(DBF_DEBUG, "ap_scan_bus running\n");
1675 983
1676 ap_query_configuration(); 984 ap_query_configuration();
1677 if (ap_select_domain() != 0) 985 if (ap_select_domain() != 0)
1678 goto out; 986 goto out;
1679 987
1680 for (i = 0; i < AP_DEVICES; i++) { 988 for (id = 0; id < AP_DEVICES; id++) {
1681 qid = AP_MKQID(i, ap_domain_index); 989 /* check if device is registered */
1682 dev = bus_find_device(&ap_bus_type, NULL, 990 dev = bus_find_device(&ap_bus_type, NULL,
1683 (void *)(unsigned long)qid, 991 (void *)(long) id,
1684 __ap_scan_bus); 992 __match_card_device_with_id);
1685 rc = ap_query_queue(qid, &queue_depth, &device_type, 993 ac = dev ? to_ap_card(dev) : NULL;
1686 &device_functions); 994 if (!ap_test_config_card_id(id)) {
1687 if (dev) { 995 if (dev) {
1688 ap_dev = to_ap_dev(dev); 996 /* Card device has been removed from
1689 spin_lock_bh(&ap_dev->lock); 997 * configuration, remove the belonging
1690 if (rc == -ENODEV) 998 * queue devices.
1691 ap_dev->state = AP_STATE_BORKED; 999 */
1692 borked = ap_dev->state == AP_STATE_BORKED; 1000 bus_for_each_dev(&ap_bus_type, NULL,
1693 spin_unlock_bh(&ap_dev->lock); 1001 (void *)(long) id,
1694 if (borked) /* Remove broken device */ 1002 __ap_queue_devices_with_id_unregister);
1003 /* now remove the card device */
1695 device_unregister(dev); 1004 device_unregister(dev);
1696 put_device(dev); 1005 put_device(dev);
1697 if (!borked) 1006 }
1698 continue;
1699 }
1700 if (rc)
1701 continue;
1702 ap_dev = kzalloc(sizeof(*ap_dev), GFP_KERNEL);
1703 if (!ap_dev)
1704 break;
1705 ap_dev->qid = qid;
1706 ap_dev->state = AP_STATE_RESET_START;
1707 ap_dev->interrupt = AP_INTR_DISABLED;
1708 ap_dev->queue_depth = queue_depth;
1709 ap_dev->raw_hwtype = device_type;
1710 ap_dev->device_type = device_type;
1711 ap_dev->functions = device_functions;
1712 spin_lock_init(&ap_dev->lock);
1713 INIT_LIST_HEAD(&ap_dev->pendingq);
1714 INIT_LIST_HEAD(&ap_dev->requestq);
1715 INIT_LIST_HEAD(&ap_dev->list);
1716 setup_timer(&ap_dev->timeout, ap_request_timeout,
1717 (unsigned long) ap_dev);
1718
1719 ap_dev->device.bus = &ap_bus_type;
1720 ap_dev->device.parent = ap_root_device;
1721 rc = dev_set_name(&ap_dev->device, "card%02x",
1722 AP_QID_DEVICE(ap_dev->qid));
1723 if (rc) {
1724 kfree(ap_dev);
1725 continue;
1726 }
1727 /* Add to list of devices */
1728 spin_lock_bh(&ap_device_list_lock);
1729 list_add(&ap_dev->list, &ap_device_list);
1730 spin_unlock_bh(&ap_device_list_lock);
1731 /* Start with a device reset */
1732 spin_lock_bh(&ap_dev->lock);
1733 ap_sm_wait(ap_sm_event(ap_dev, AP_EVENT_POLL));
1734 spin_unlock_bh(&ap_dev->lock);
1735 /* Register device */
1736 ap_dev->device.release = ap_device_release;
1737 rc = device_register(&ap_dev->device);
1738 if (rc) {
1739 spin_lock_bh(&ap_dev->lock);
1740 list_del_init(&ap_dev->list);
1741 spin_unlock_bh(&ap_dev->lock);
1742 put_device(&ap_dev->device);
1743 continue; 1007 continue;
1744 } 1008 }
1745 /* Add device attributes. */ 1009 /* According to the configuration there should be a card
1746 rc = sysfs_create_group(&ap_dev->device.kobj, 1010 * device, so check if there is at least one valid queue
1747 &ap_dev_attr_group); 1011 * and maybe create queue devices and the card device.
1748 if (rc) { 1012 */
1749 device_unregister(&ap_dev->device); 1013 domains = 0;
1750 continue; 1014 for (dom = 0; dom < AP_DOMAINS; dom++) {
1015 qid = AP_MKQID(id, dom);
1016 dev = bus_find_device(&ap_bus_type, NULL,
1017 (void *)(long) qid,
1018 __match_queue_device_with_qid);
1019 aq = dev ? to_ap_queue(dev) : NULL;
1020 if (!ap_test_config_domain(dom)) {
1021 if (dev) {
1022 /* Queue device exists but has been
1023 * removed from configuration.
1024 */
1025 device_unregister(dev);
1026 put_device(dev);
1027 }
1028 continue;
1029 }
1030 rc = ap_query_queue(qid, &depth, &type, &functions);
1031 if (dev) {
1032 spin_lock_bh(&aq->lock);
1033 if (rc == -ENODEV ||
1034 /* adapter reconfiguration */
1035 (ac && ac->functions != functions))
1036 aq->state = AP_STATE_BORKED;
1037 borked = aq->state == AP_STATE_BORKED;
1038 spin_unlock_bh(&aq->lock);
1039 if (borked) /* Remove broken device */
1040 device_unregister(dev);
1041 put_device(dev);
1042 if (!borked) {
1043 domains++;
1044 continue;
1045 }
1046 }
1047 if (rc)
1048 continue;
1049 /* new queue device needed */
1050 if (!ac) {
1051 /* but first create the card device */
1052 ac = ap_card_create(id, depth,
1053 type, functions);
1054 if (!ac)
1055 continue;
1056 ac->ap_dev.device.bus = &ap_bus_type;
1057 ac->ap_dev.device.parent = ap_root_device;
1058 dev_set_name(&ac->ap_dev.device,
1059 "card%02x", id);
1060 /* Register card with AP bus */
1061 rc = device_register(&ac->ap_dev.device);
1062 if (rc) {
1063 put_device(&ac->ap_dev.device);
1064 ac = NULL;
1065 break;
1066 }
1067 /* get it and thus adjust reference counter */
1068 get_device(&ac->ap_dev.device);
1069 /* Add card device to card list */
1070 spin_lock_bh(&ap_list_lock);
1071 list_add(&ac->list, &ap_card_list);
1072 spin_unlock_bh(&ap_list_lock);
1073 }
1074 /* now create the new queue device */
1075 aq = ap_queue_create(qid, type);
1076 if (!aq)
1077 continue;
1078 aq->card = ac;
1079 aq->ap_dev.device.bus = &ap_bus_type;
1080 aq->ap_dev.device.parent = &ac->ap_dev.device;
1081 dev_set_name(&aq->ap_dev.device,
1082 "%02x.%04x", id, dom);
1083 /* Add queue device to card queue list */
1084 spin_lock_bh(&ap_list_lock);
1085 list_add(&aq->list, &ac->queues);
1086 spin_unlock_bh(&ap_list_lock);
1087 /* Start with a device reset */
1088 spin_lock_bh(&aq->lock);
1089 ap_wait(ap_sm_event(aq, AP_EVENT_POLL));
1090 spin_unlock_bh(&aq->lock);
1091 /* Register device */
1092 rc = device_register(&aq->ap_dev.device);
1093 if (rc) {
1094 spin_lock_bh(&ap_list_lock);
1095 list_del_init(&aq->list);
1096 spin_unlock_bh(&ap_list_lock);
1097 put_device(&aq->ap_dev.device);
1098 continue;
1099 }
1100 domains++;
1101 } /* end domain loop */
1102 if (ac) {
1103 /* remove card dev if there are no queue devices */
1104 if (!domains)
1105 device_unregister(&ac->ap_dev.device);
1106 put_device(&ac->ap_dev.device);
1751 } 1107 }
1752 } 1108 } /* end device loop */
1753out: 1109out:
1754 mod_timer(&ap_config_timer, jiffies + ap_config_time * HZ); 1110 mod_timer(&ap_config_timer, jiffies + ap_config_time * HZ);
1755} 1111}
@@ -1768,7 +1124,7 @@ static void ap_reset_domain(void)
1768 if (ap_domain_index == -1 || !ap_test_config_domain(ap_domain_index)) 1124 if (ap_domain_index == -1 || !ap_test_config_domain(ap_domain_index))
1769 return; 1125 return;
1770 for (i = 0; i < AP_DEVICES; i++) 1126 for (i = 0; i < AP_DEVICES; i++)
1771 ap_reset_queue(AP_MKQID(i, ap_domain_index)); 1127 ap_rapq(AP_MKQID(i, ap_domain_index));
1772} 1128}
1773 1129
1774static void ap_reset_all(void) 1130static void ap_reset_all(void)
@@ -1781,7 +1137,7 @@ static void ap_reset_all(void)
1781 for (j = 0; j < AP_DEVICES; j++) { 1137 for (j = 0; j < AP_DEVICES; j++) {
1782 if (!ap_test_config_card_id(j)) 1138 if (!ap_test_config_card_id(j))
1783 continue; 1139 continue;
1784 ap_reset_queue(AP_MKQID(j, i)); 1140 ap_rapq(AP_MKQID(j, i));
1785 } 1141 }
1786 } 1142 }
1787} 1143}
@@ -1790,6 +1146,23 @@ static struct reset_call ap_reset_call = {
1790 .fn = ap_reset_all, 1146 .fn = ap_reset_all,
1791}; 1147};
1792 1148
1149int __init ap_debug_init(void)
1150{
1151 ap_dbf_root = debugfs_create_dir("ap", NULL);
1152 ap_dbf_info = debug_register("ap", 1, 1,
1153 DBF_MAX_SPRINTF_ARGS * sizeof(long));
1154 debug_register_view(ap_dbf_info, &debug_sprintf_view);
1155 debug_set_level(ap_dbf_info, DBF_ERR);
1156
1157 return 0;
1158}
1159
1160void ap_debug_exit(void)
1161{
1162 debugfs_remove(ap_dbf_root);
1163 debug_unregister(ap_dbf_info);
1164}
1165
1793/** 1166/**
1794 * ap_module_init(): The module initialization code. 1167 * ap_module_init(): The module initialization code.
1795 * 1168 *
@@ -1800,6 +1173,10 @@ int __init ap_module_init(void)
1800 int max_domain_id; 1173 int max_domain_id;
1801 int rc, i; 1174 int rc, i;
1802 1175
1176 rc = ap_debug_init();
1177 if (rc)
1178 return rc;
1179
1803 if (ap_instructions_available() != 0) { 1180 if (ap_instructions_available() != 0) {
1804 pr_warn("The hardware system does not support AP instructions\n"); 1181 pr_warn("The hardware system does not support AP instructions\n");
1805 return -ENODEV; 1182 return -ENODEV;
@@ -1909,7 +1286,15 @@ void ap_module_exit(void)
1909 del_timer_sync(&ap_config_timer); 1286 del_timer_sync(&ap_config_timer);
1910 hrtimer_cancel(&ap_poll_timer); 1287 hrtimer_cancel(&ap_poll_timer);
1911 tasklet_kill(&ap_tasklet); 1288 tasklet_kill(&ap_tasklet);
1912 bus_for_each_dev(&ap_bus_type, NULL, NULL, __ap_devices_unregister); 1289
1290 /* first remove queue devices */
1291 bus_for_each_dev(&ap_bus_type, NULL, NULL,
1292 __ap_queue_devices_unregister);
1293 /* now remove the card devices */
1294 bus_for_each_dev(&ap_bus_type, NULL, NULL,
1295 __ap_card_devices_unregister);
1296
1297 /* remove bus attributes */
1913 for (i = 0; ap_bus_attrs[i]; i++) 1298 for (i = 0; ap_bus_attrs[i]; i++)
1914 bus_remove_file(&ap_bus_type, ap_bus_attrs[i]); 1299 bus_remove_file(&ap_bus_type, ap_bus_attrs[i]);
1915 unregister_pm_notifier(&ap_power_notifier); 1300 unregister_pm_notifier(&ap_power_notifier);
@@ -1919,6 +1304,8 @@ void ap_module_exit(void)
1919 unregister_reset_call(&ap_reset_call); 1304 unregister_reset_call(&ap_reset_call);
1920 if (ap_using_interrupts()) 1305 if (ap_using_interrupts())
1921 unregister_adapter_interrupt(&ap_airq); 1306 unregister_adapter_interrupt(&ap_airq);
1307
1308 ap_debug_exit();
1922} 1309}
1923 1310
1924module_init(ap_module_init); 1311module_init(ap_module_init);
diff --git a/drivers/s390/crypto/ap_bus.h b/drivers/s390/crypto/ap_bus.h
index d7fdf5c024d7..4dc7c88fb054 100644
--- a/drivers/s390/crypto/ap_bus.h
+++ b/drivers/s390/crypto/ap_bus.h
@@ -27,7 +27,6 @@
27#define _AP_BUS_H_ 27#define _AP_BUS_H_
28 28
29#include <linux/device.h> 29#include <linux/device.h>
30#include <linux/mod_devicetable.h>
31#include <linux/types.h> 30#include <linux/types.h>
32 31
33#define AP_DEVICES 64 /* Number of AP devices. */ 32#define AP_DEVICES 64 /* Number of AP devices. */
@@ -38,14 +37,17 @@
38 37
39extern int ap_domain_index; 38extern int ap_domain_index;
40 39
40extern spinlock_t ap_list_lock;
41extern struct list_head ap_card_list;
42
41/** 43/**
42 * The ap_qid_t identifier of an ap queue. It contains a 44 * The ap_qid_t identifier of an ap queue. It contains a
43 * 6 bit device index and a 4 bit queue index (domain). 45 * 6 bit card index and a 4 bit queue index (domain).
44 */ 46 */
45typedef unsigned int ap_qid_t; 47typedef unsigned int ap_qid_t;
46 48
47#define AP_MKQID(_device, _queue) (((_device) & 63) << 8 | ((_queue) & 255)) 49#define AP_MKQID(_card, _queue) (((_card) & 63) << 8 | ((_queue) & 255))
48#define AP_QID_DEVICE(_qid) (((_qid) >> 8) & 63) 50#define AP_QID_CARD(_qid) (((_qid) >> 8) & 63)
49#define AP_QID_QUEUE(_qid) ((_qid) & 255) 51#define AP_QID_QUEUE(_qid) ((_qid) & 255)
50 52
51/** 53/**
@@ -55,7 +57,7 @@ typedef unsigned int ap_qid_t;
55 * @queue_full: Is 1 if the queue is full 57 * @queue_full: Is 1 if the queue is full
56 * @pad: A 4 bit pad 58 * @pad: A 4 bit pad
57 * @int_enabled: Shows if interrupts are enabled for the AP 59 * @int_enabled: Shows if interrupts are enabled for the AP
58 * @response_conde: Holds the 8 bit response code 60 * @response_code: Holds the 8 bit response code
59 * @pad2: A 16 bit pad 61 * @pad2: A 16 bit pad
60 * 62 *
61 * The ap queue status word is returned by all three AP functions 63 * The ap queue status word is returned by all three AP functions
@@ -105,6 +107,7 @@ static inline int ap_test_bit(unsigned int *ptr, unsigned int nr)
105#define AP_DEVICE_TYPE_CEX3C 9 107#define AP_DEVICE_TYPE_CEX3C 9
106#define AP_DEVICE_TYPE_CEX4 10 108#define AP_DEVICE_TYPE_CEX4 10
107#define AP_DEVICE_TYPE_CEX5 11 109#define AP_DEVICE_TYPE_CEX5 11
110#define AP_DEVICE_TYPE_CEX6 12
108 111
109/* 112/*
110 * Known function facilities 113 * Known function facilities
@@ -166,7 +169,8 @@ struct ap_driver {
166 169
167 int (*probe)(struct ap_device *); 170 int (*probe)(struct ap_device *);
168 void (*remove)(struct ap_device *); 171 void (*remove)(struct ap_device *);
169 int request_timeout; /* request timeout in jiffies */ 172 void (*suspend)(struct ap_device *);
173 void (*resume)(struct ap_device *);
170}; 174};
171 175
172#define to_ap_drv(x) container_of((x), struct ap_driver, driver) 176#define to_ap_drv(x) container_of((x), struct ap_driver, driver)
@@ -174,38 +178,51 @@ struct ap_driver {
174int ap_driver_register(struct ap_driver *, struct module *, char *); 178int ap_driver_register(struct ap_driver *, struct module *, char *);
175void ap_driver_unregister(struct ap_driver *); 179void ap_driver_unregister(struct ap_driver *);
176 180
177typedef enum ap_wait (ap_func_t)(struct ap_device *ap_dev);
178
179struct ap_device { 181struct ap_device {
180 struct device device; 182 struct device device;
181 struct ap_driver *drv; /* Pointer to AP device driver. */ 183 struct ap_driver *drv; /* Pointer to AP device driver. */
182 spinlock_t lock; /* Per device lock. */ 184 int device_type; /* AP device type. */
183 struct list_head list; /* private list of all AP devices. */ 185};
184 186
185 enum ap_state state; /* State of the AP device. */ 187#define to_ap_dev(x) container_of((x), struct ap_device, device)
186 188
187 ap_qid_t qid; /* AP queue id. */ 189struct ap_card {
188 int queue_depth; /* AP queue depth.*/ 190 struct ap_device ap_dev;
189 int device_type; /* AP device type. */ 191 struct list_head list; /* Private list of AP cards. */
192 struct list_head queues; /* List of assoc. AP queues */
193 void *private; /* ap driver private pointer. */
190 int raw_hwtype; /* AP raw hardware type. */ 194 int raw_hwtype; /* AP raw hardware type. */
191 unsigned int functions; /* AP device function bitfield. */ 195 unsigned int functions; /* AP device function bitfield. */
192 struct timer_list timeout; /* Timer for request timeouts. */ 196 int queue_depth; /* AP queue depth.*/
197 int id; /* AP card number. */
198 atomic_t total_request_count; /* # requests ever for this AP device.*/
199};
200
201#define to_ap_card(x) container_of((x), struct ap_card, ap_dev.device)
193 202
203struct ap_queue {
204 struct ap_device ap_dev;
205 struct list_head list; /* Private list of AP queues. */
206 struct ap_card *card; /* Ptr to assoc. AP card. */
207 spinlock_t lock; /* Per device lock. */
208 void *private; /* ap driver private pointer. */
209 ap_qid_t qid; /* AP queue id. */
194 int interrupt; /* indicate if interrupts are enabled */ 210 int interrupt; /* indicate if interrupts are enabled */
195 int queue_count; /* # messages currently on AP queue. */ 211 int queue_count; /* # messages currently on AP queue. */
196 212 enum ap_state state; /* State of the AP device. */
197 struct list_head pendingq; /* List of message sent to AP queue. */
198 int pendingq_count; /* # requests on pendingq list. */ 213 int pendingq_count; /* # requests on pendingq list. */
199 struct list_head requestq; /* List of message yet to be sent. */
200 int requestq_count; /* # requests on requestq list. */ 214 int requestq_count; /* # requests on requestq list. */
201 int total_request_count; /* # requests ever for this AP device. */ 215 int total_request_count; /* # requests ever for this AP device.*/
202 216 int request_timeout; /* Request timout in jiffies. */
217 struct timer_list timeout; /* Timer for request timeouts. */
218 struct list_head pendingq; /* List of message sent to AP queue. */
219 struct list_head requestq; /* List of message yet to be sent. */
203 struct ap_message *reply; /* Per device reply message. */ 220 struct ap_message *reply; /* Per device reply message. */
204
205 void *private; /* ap driver private pointer. */
206}; 221};
207 222
208#define to_ap_dev(x) container_of((x), struct ap_device, device) 223#define to_ap_queue(x) container_of((x), struct ap_queue, ap_dev.device)
224
225typedef enum ap_wait (ap_func_t)(struct ap_queue *queue);
209 226
210struct ap_message { 227struct ap_message {
211 struct list_head list; /* Request queueing. */ 228 struct list_head list; /* Request queueing. */
@@ -217,7 +234,7 @@ struct ap_message {
217 void *private; /* ap driver private pointer. */ 234 void *private; /* ap driver private pointer. */
218 unsigned int special:1; /* Used for special commands. */ 235 unsigned int special:1; /* Used for special commands. */
219 /* receive is called from tasklet context */ 236 /* receive is called from tasklet context */
220 void (*receive)(struct ap_device *, struct ap_message *, 237 void (*receive)(struct ap_queue *, struct ap_message *,
221 struct ap_message *); 238 struct ap_message *);
222}; 239};
223 240
@@ -232,10 +249,6 @@ struct ap_config_info {
232 unsigned char reserved4[16]; 249 unsigned char reserved4[16];
233} __packed; 250} __packed;
234 251
235#define AP_DEVICE(dt) \
236 .dev_type=(dt), \
237 .match_flags=AP_DEVICE_ID_MATCH_DEVICE_TYPE,
238
239/** 252/**
240 * ap_init_message() - Initialize ap_message. 253 * ap_init_message() - Initialize ap_message.
241 * Initialize a message before using. Otherwise this might result in 254 * Initialize a message before using. Otherwise this might result in
@@ -250,6 +263,12 @@ static inline void ap_init_message(struct ap_message *ap_msg)
250 ap_msg->receive = NULL; 263 ap_msg->receive = NULL;
251} 264}
252 265
266#define for_each_ap_card(_ac) \
267 list_for_each_entry(_ac, &ap_card_list, list)
268
269#define for_each_ap_queue(_aq, _ac) \
270 list_for_each_entry(_aq, &(_ac)->queues, list)
271
253/* 272/*
254 * Note: don't use ap_send/ap_recv after using ap_queue_message 273 * Note: don't use ap_send/ap_recv after using ap_queue_message
255 * for the first time. Otherwise the ap message queue will get 274 * for the first time. Otherwise the ap message queue will get
@@ -258,11 +277,26 @@ static inline void ap_init_message(struct ap_message *ap_msg)
258int ap_send(ap_qid_t, unsigned long long, void *, size_t); 277int ap_send(ap_qid_t, unsigned long long, void *, size_t);
259int ap_recv(ap_qid_t, unsigned long long *, void *, size_t); 278int ap_recv(ap_qid_t, unsigned long long *, void *, size_t);
260 279
261void ap_queue_message(struct ap_device *ap_dev, struct ap_message *ap_msg); 280enum ap_wait ap_sm_event(struct ap_queue *aq, enum ap_event event);
262void ap_cancel_message(struct ap_device *ap_dev, struct ap_message *ap_msg); 281enum ap_wait ap_sm_event_loop(struct ap_queue *aq, enum ap_event event);
263void ap_flush_queue(struct ap_device *ap_dev); 282
283void ap_queue_message(struct ap_queue *aq, struct ap_message *ap_msg);
284void ap_cancel_message(struct ap_queue *aq, struct ap_message *ap_msg);
285void ap_flush_queue(struct ap_queue *aq);
286
287void *ap_airq_ptr(void);
288void ap_wait(enum ap_wait wait);
289void ap_request_timeout(unsigned long data);
264void ap_bus_force_rescan(void); 290void ap_bus_force_rescan(void);
265void ap_device_init_reply(struct ap_device *ap_dev, struct ap_message *ap_msg); 291
292void ap_queue_init_reply(struct ap_queue *aq, struct ap_message *ap_msg);
293struct ap_queue *ap_queue_create(ap_qid_t qid, int device_type);
294void ap_queue_remove(struct ap_queue *aq);
295void ap_queue_suspend(struct ap_device *ap_dev);
296void ap_queue_resume(struct ap_device *ap_dev);
297
298struct ap_card *ap_card_create(int id, int queue_depth, int device_type,
299 unsigned int device_functions);
266 300
267int ap_module_init(void); 301int ap_module_init(void);
268void ap_module_exit(void); 302void ap_module_exit(void);
diff --git a/drivers/s390/crypto/ap_card.c b/drivers/s390/crypto/ap_card.c
new file mode 100644
index 000000000000..0110d44172a3
--- /dev/null
+++ b/drivers/s390/crypto/ap_card.c
@@ -0,0 +1,170 @@
1/*
2 * Copyright IBM Corp. 2016
3 * Author(s): Martin Schwidefsky <schwidefsky@de.ibm.com>
4 *
5 * Adjunct processor bus, card related code.
6 */
7
8#define KMSG_COMPONENT "ap"
9#define pr_fmt(fmt) KMSG_COMPONENT ": " fmt
10
11#include <linux/init.h>
12#include <linux/slab.h>
13#include <asm/facility.h>
14
15#include "ap_bus.h"
16#include "ap_asm.h"
17
18/*
19 * AP card related attributes.
20 */
21static ssize_t ap_hwtype_show(struct device *dev,
22 struct device_attribute *attr, char *buf)
23{
24 struct ap_card *ac = to_ap_card(dev);
25
26 return snprintf(buf, PAGE_SIZE, "%d\n", ac->ap_dev.device_type);
27}
28
29static DEVICE_ATTR(hwtype, 0444, ap_hwtype_show, NULL);
30
31static ssize_t ap_raw_hwtype_show(struct device *dev,
32 struct device_attribute *attr, char *buf)
33{
34 struct ap_card *ac = to_ap_card(dev);
35
36 return snprintf(buf, PAGE_SIZE, "%d\n", ac->raw_hwtype);
37}
38
39static DEVICE_ATTR(raw_hwtype, 0444, ap_raw_hwtype_show, NULL);
40
41static ssize_t ap_depth_show(struct device *dev, struct device_attribute *attr,
42 char *buf)
43{
44 struct ap_card *ac = to_ap_card(dev);
45
46 return snprintf(buf, PAGE_SIZE, "%d\n", ac->queue_depth);
47}
48
49static DEVICE_ATTR(depth, 0444, ap_depth_show, NULL);
50
51static ssize_t ap_functions_show(struct device *dev,
52 struct device_attribute *attr, char *buf)
53{
54 struct ap_card *ac = to_ap_card(dev);
55
56 return snprintf(buf, PAGE_SIZE, "0x%08X\n", ac->functions);
57}
58
59static DEVICE_ATTR(ap_functions, 0444, ap_functions_show, NULL);
60
61static ssize_t ap_request_count_show(struct device *dev,
62 struct device_attribute *attr,
63 char *buf)
64{
65 struct ap_card *ac = to_ap_card(dev);
66 unsigned int req_cnt;
67
68 req_cnt = 0;
69 spin_lock_bh(&ap_list_lock);
70 req_cnt = atomic_read(&ac->total_request_count);
71 spin_unlock_bh(&ap_list_lock);
72 return snprintf(buf, PAGE_SIZE, "%d\n", req_cnt);
73}
74
75static DEVICE_ATTR(request_count, 0444, ap_request_count_show, NULL);
76
77static ssize_t ap_requestq_count_show(struct device *dev,
78 struct device_attribute *attr, char *buf)
79{
80 struct ap_card *ac = to_ap_card(dev);
81 struct ap_queue *aq;
82 unsigned int reqq_cnt;
83
84 reqq_cnt = 0;
85 spin_lock_bh(&ap_list_lock);
86 for_each_ap_queue(aq, ac)
87 reqq_cnt += aq->requestq_count;
88 spin_unlock_bh(&ap_list_lock);
89 return snprintf(buf, PAGE_SIZE, "%d\n", reqq_cnt);
90}
91
92static DEVICE_ATTR(requestq_count, 0444, ap_requestq_count_show, NULL);
93
94static ssize_t ap_pendingq_count_show(struct device *dev,
95 struct device_attribute *attr, char *buf)
96{
97 struct ap_card *ac = to_ap_card(dev);
98 struct ap_queue *aq;
99 unsigned int penq_cnt;
100
101 penq_cnt = 0;
102 spin_lock_bh(&ap_list_lock);
103 for_each_ap_queue(aq, ac)
104 penq_cnt += aq->pendingq_count;
105 spin_unlock_bh(&ap_list_lock);
106 return snprintf(buf, PAGE_SIZE, "%d\n", penq_cnt);
107}
108
109static DEVICE_ATTR(pendingq_count, 0444, ap_pendingq_count_show, NULL);
110
111static ssize_t ap_modalias_show(struct device *dev,
112 struct device_attribute *attr, char *buf)
113{
114 return sprintf(buf, "ap:t%02X\n", to_ap_dev(dev)->device_type);
115}
116
117static DEVICE_ATTR(modalias, 0444, ap_modalias_show, NULL);
118
119static struct attribute *ap_card_dev_attrs[] = {
120 &dev_attr_hwtype.attr,
121 &dev_attr_raw_hwtype.attr,
122 &dev_attr_depth.attr,
123 &dev_attr_ap_functions.attr,
124 &dev_attr_request_count.attr,
125 &dev_attr_requestq_count.attr,
126 &dev_attr_pendingq_count.attr,
127 &dev_attr_modalias.attr,
128 NULL
129};
130
131static struct attribute_group ap_card_dev_attr_group = {
132 .attrs = ap_card_dev_attrs
133};
134
135static const struct attribute_group *ap_card_dev_attr_groups[] = {
136 &ap_card_dev_attr_group,
137 NULL
138};
139
140struct device_type ap_card_type = {
141 .name = "ap_card",
142 .groups = ap_card_dev_attr_groups,
143};
144
145static void ap_card_device_release(struct device *dev)
146{
147 kfree(to_ap_card(dev));
148}
149
150struct ap_card *ap_card_create(int id, int queue_depth, int device_type,
151 unsigned int functions)
152{
153 struct ap_card *ac;
154
155 ac = kzalloc(sizeof(*ac), GFP_KERNEL);
156 if (!ac)
157 return NULL;
158 INIT_LIST_HEAD(&ac->queues);
159 ac->ap_dev.device.release = ap_card_device_release;
160 ac->ap_dev.device.type = &ap_card_type;
161 ac->ap_dev.device_type = device_type;
162 /* CEX6 toleration: map to CEX5 */
163 if (device_type == AP_DEVICE_TYPE_CEX6)
164 ac->ap_dev.device_type = AP_DEVICE_TYPE_CEX5;
165 ac->raw_hwtype = device_type;
166 ac->queue_depth = queue_depth;
167 ac->functions = functions;
168 ac->id = id;
169 return ac;
170}
diff --git a/drivers/s390/crypto/ap_debug.h b/drivers/s390/crypto/ap_debug.h
new file mode 100644
index 000000000000..78dbff842dae
--- /dev/null
+++ b/drivers/s390/crypto/ap_debug.h
@@ -0,0 +1,28 @@
1/*
2 * Copyright IBM Corp. 2016
3 * Author(s): Harald Freudenberger <freude@de.ibm.com>
4 */
5#ifndef AP_DEBUG_H
6#define AP_DEBUG_H
7
8#include <asm/debug.h>
9
10#define DBF_ERR 3 /* error conditions */
11#define DBF_WARN 4 /* warning conditions */
12#define DBF_INFO 5 /* informational */
13#define DBF_DEBUG 6 /* for debugging only */
14
15#define RC2ERR(rc) ((rc) ? DBF_ERR : DBF_INFO)
16#define RC2WARN(rc) ((rc) ? DBF_WARN : DBF_INFO)
17
18#define DBF_MAX_SPRINTF_ARGS 5
19
20#define AP_DBF(...) \
21 debug_sprintf_event(ap_dbf_info, ##__VA_ARGS__)
22
23extern debug_info_t *ap_dbf_info;
24
25int ap_debug_init(void);
26void ap_debug_exit(void);
27
28#endif /* AP_DEBUG_H */
diff --git a/drivers/s390/crypto/ap_queue.c b/drivers/s390/crypto/ap_queue.c
new file mode 100644
index 000000000000..b58a917dc510
--- /dev/null
+++ b/drivers/s390/crypto/ap_queue.c
@@ -0,0 +1,701 @@
1/*
2 * Copyright IBM Corp. 2016
3 * Author(s): Martin Schwidefsky <schwidefsky@de.ibm.com>
4 *
5 * Adjunct processor bus, queue related code.
6 */
7
8#define KMSG_COMPONENT "ap"
9#define pr_fmt(fmt) KMSG_COMPONENT ": " fmt
10
11#include <linux/init.h>
12#include <linux/slab.h>
13#include <asm/facility.h>
14
15#include "ap_bus.h"
16#include "ap_asm.h"
17
18/**
19 * ap_queue_enable_interruption(): Enable interruption on an AP queue.
20 * @qid: The AP queue number
21 * @ind: the notification indicator byte
22 *
23 * Enables interruption on AP queue via ap_aqic(). Based on the return
24 * value it waits a while and tests the AP queue if interrupts
25 * have been switched on using ap_test_queue().
26 */
27static int ap_queue_enable_interruption(struct ap_queue *aq, void *ind)
28{
29 struct ap_queue_status status;
30
31 status = ap_aqic(aq->qid, ind);
32 switch (status.response_code) {
33 case AP_RESPONSE_NORMAL:
34 case AP_RESPONSE_OTHERWISE_CHANGED:
35 return 0;
36 case AP_RESPONSE_Q_NOT_AVAIL:
37 case AP_RESPONSE_DECONFIGURED:
38 case AP_RESPONSE_CHECKSTOPPED:
39 case AP_RESPONSE_INVALID_ADDRESS:
40 pr_err("Registering adapter interrupts for AP device %02x.%04x failed\n",
41 AP_QID_CARD(aq->qid),
42 AP_QID_QUEUE(aq->qid));
43 return -EOPNOTSUPP;
44 case AP_RESPONSE_RESET_IN_PROGRESS:
45 case AP_RESPONSE_BUSY:
46 default:
47 return -EBUSY;
48 }
49}
50
51/**
52 * __ap_send(): Send message to adjunct processor queue.
53 * @qid: The AP queue number
54 * @psmid: The program supplied message identifier
55 * @msg: The message text
56 * @length: The message length
57 * @special: Special Bit
58 *
59 * Returns AP queue status structure.
60 * Condition code 1 on NQAP can't happen because the L bit is 1.
61 * Condition code 2 on NQAP also means the send is incomplete,
62 * because a segment boundary was reached. The NQAP is repeated.
63 */
64static inline struct ap_queue_status
65__ap_send(ap_qid_t qid, unsigned long long psmid, void *msg, size_t length,
66 unsigned int special)
67{
68 if (special == 1)
69 qid |= 0x400000UL;
70 return ap_nqap(qid, psmid, msg, length);
71}
72
73int ap_send(ap_qid_t qid, unsigned long long psmid, void *msg, size_t length)
74{
75 struct ap_queue_status status;
76
77 status = __ap_send(qid, psmid, msg, length, 0);
78 switch (status.response_code) {
79 case AP_RESPONSE_NORMAL:
80 return 0;
81 case AP_RESPONSE_Q_FULL:
82 case AP_RESPONSE_RESET_IN_PROGRESS:
83 return -EBUSY;
84 case AP_RESPONSE_REQ_FAC_NOT_INST:
85 return -EINVAL;
86 default: /* Device is gone. */
87 return -ENODEV;
88 }
89}
90EXPORT_SYMBOL(ap_send);
91
92int ap_recv(ap_qid_t qid, unsigned long long *psmid, void *msg, size_t length)
93{
94 struct ap_queue_status status;
95
96 if (msg == NULL)
97 return -EINVAL;
98 status = ap_dqap(qid, psmid, msg, length);
99 switch (status.response_code) {
100 case AP_RESPONSE_NORMAL:
101 return 0;
102 case AP_RESPONSE_NO_PENDING_REPLY:
103 if (status.queue_empty)
104 return -ENOENT;
105 return -EBUSY;
106 case AP_RESPONSE_RESET_IN_PROGRESS:
107 return -EBUSY;
108 default:
109 return -ENODEV;
110 }
111}
112EXPORT_SYMBOL(ap_recv);
113
114/* State machine definitions and helpers */
115
116static enum ap_wait ap_sm_nop(struct ap_queue *aq)
117{
118 return AP_WAIT_NONE;
119}
120
121/**
122 * ap_sm_recv(): Receive pending reply messages from an AP queue but do
123 * not change the state of the device.
124 * @aq: pointer to the AP queue
125 *
126 * Returns AP_WAIT_NONE, AP_WAIT_AGAIN, or AP_WAIT_INTERRUPT
127 */
128static struct ap_queue_status ap_sm_recv(struct ap_queue *aq)
129{
130 struct ap_queue_status status;
131 struct ap_message *ap_msg;
132
133 status = ap_dqap(aq->qid, &aq->reply->psmid,
134 aq->reply->message, aq->reply->length);
135 switch (status.response_code) {
136 case AP_RESPONSE_NORMAL:
137 aq->queue_count--;
138 if (aq->queue_count > 0)
139 mod_timer(&aq->timeout,
140 jiffies + aq->request_timeout);
141 list_for_each_entry(ap_msg, &aq->pendingq, list) {
142 if (ap_msg->psmid != aq->reply->psmid)
143 continue;
144 list_del_init(&ap_msg->list);
145 aq->pendingq_count--;
146 ap_msg->receive(aq, ap_msg, aq->reply);
147 break;
148 }
149 case AP_RESPONSE_NO_PENDING_REPLY:
150 if (!status.queue_empty || aq->queue_count <= 0)
151 break;
152 /* The card shouldn't forget requests but who knows. */
153 aq->queue_count = 0;
154 list_splice_init(&aq->pendingq, &aq->requestq);
155 aq->requestq_count += aq->pendingq_count;
156 aq->pendingq_count = 0;
157 break;
158 default:
159 break;
160 }
161 return status;
162}
163
164/**
165 * ap_sm_read(): Receive pending reply messages from an AP queue.
166 * @aq: pointer to the AP queue
167 *
168 * Returns AP_WAIT_NONE, AP_WAIT_AGAIN, or AP_WAIT_INTERRUPT
169 */
170static enum ap_wait ap_sm_read(struct ap_queue *aq)
171{
172 struct ap_queue_status status;
173
174 if (!aq->reply)
175 return AP_WAIT_NONE;
176 status = ap_sm_recv(aq);
177 switch (status.response_code) {
178 case AP_RESPONSE_NORMAL:
179 if (aq->queue_count > 0) {
180 aq->state = AP_STATE_WORKING;
181 return AP_WAIT_AGAIN;
182 }
183 aq->state = AP_STATE_IDLE;
184 return AP_WAIT_NONE;
185 case AP_RESPONSE_NO_PENDING_REPLY:
186 if (aq->queue_count > 0)
187 return AP_WAIT_INTERRUPT;
188 aq->state = AP_STATE_IDLE;
189 return AP_WAIT_NONE;
190 default:
191 aq->state = AP_STATE_BORKED;
192 return AP_WAIT_NONE;
193 }
194}
195
196/**
197 * ap_sm_suspend_read(): Receive pending reply messages from an AP queue
198 * without changing the device state in between. In suspend mode we don't
199 * allow sending new requests, therefore just fetch pending replies.
200 * @aq: pointer to the AP queue
201 *
202 * Returns AP_WAIT_NONE or AP_WAIT_AGAIN
203 */
204static enum ap_wait ap_sm_suspend_read(struct ap_queue *aq)
205{
206 struct ap_queue_status status;
207
208 if (!aq->reply)
209 return AP_WAIT_NONE;
210 status = ap_sm_recv(aq);
211 switch (status.response_code) {
212 case AP_RESPONSE_NORMAL:
213 if (aq->queue_count > 0)
214 return AP_WAIT_AGAIN;
215 /* fall through */
216 default:
217 return AP_WAIT_NONE;
218 }
219}
220
221/**
222 * ap_sm_write(): Send messages from the request queue to an AP queue.
223 * @aq: pointer to the AP queue
224 *
225 * Returns AP_WAIT_NONE, AP_WAIT_AGAIN, or AP_WAIT_INTERRUPT
226 */
227static enum ap_wait ap_sm_write(struct ap_queue *aq)
228{
229 struct ap_queue_status status;
230 struct ap_message *ap_msg;
231
232 if (aq->requestq_count <= 0)
233 return AP_WAIT_NONE;
234 /* Start the next request on the queue. */
235 ap_msg = list_entry(aq->requestq.next, struct ap_message, list);
236 status = __ap_send(aq->qid, ap_msg->psmid,
237 ap_msg->message, ap_msg->length, ap_msg->special);
238 switch (status.response_code) {
239 case AP_RESPONSE_NORMAL:
240 aq->queue_count++;
241 if (aq->queue_count == 1)
242 mod_timer(&aq->timeout, jiffies + aq->request_timeout);
243 list_move_tail(&ap_msg->list, &aq->pendingq);
244 aq->requestq_count--;
245 aq->pendingq_count++;
246 if (aq->queue_count < aq->card->queue_depth) {
247 aq->state = AP_STATE_WORKING;
248 return AP_WAIT_AGAIN;
249 }
250 /* fall through */
251 case AP_RESPONSE_Q_FULL:
252 aq->state = AP_STATE_QUEUE_FULL;
253 return AP_WAIT_INTERRUPT;
254 case AP_RESPONSE_RESET_IN_PROGRESS:
255 aq->state = AP_STATE_RESET_WAIT;
256 return AP_WAIT_TIMEOUT;
257 case AP_RESPONSE_MESSAGE_TOO_BIG:
258 case AP_RESPONSE_REQ_FAC_NOT_INST:
259 list_del_init(&ap_msg->list);
260 aq->requestq_count--;
261 ap_msg->rc = -EINVAL;
262 ap_msg->receive(aq, ap_msg, NULL);
263 return AP_WAIT_AGAIN;
264 default:
265 aq->state = AP_STATE_BORKED;
266 return AP_WAIT_NONE;
267 }
268}
269
270/**
271 * ap_sm_read_write(): Send and receive messages to/from an AP queue.
272 * @aq: pointer to the AP queue
273 *
274 * Returns AP_WAIT_NONE, AP_WAIT_AGAIN, or AP_WAIT_INTERRUPT
275 */
276static enum ap_wait ap_sm_read_write(struct ap_queue *aq)
277{
278 return min(ap_sm_read(aq), ap_sm_write(aq));
279}
280
281/**
282 * ap_sm_reset(): Reset an AP queue.
283 * @qid: The AP queue number
284 *
285 * Submit the Reset command to an AP queue.
286 */
287static enum ap_wait ap_sm_reset(struct ap_queue *aq)
288{
289 struct ap_queue_status status;
290
291 status = ap_rapq(aq->qid);
292 switch (status.response_code) {
293 case AP_RESPONSE_NORMAL:
294 case AP_RESPONSE_RESET_IN_PROGRESS:
295 aq->state = AP_STATE_RESET_WAIT;
296 aq->interrupt = AP_INTR_DISABLED;
297 return AP_WAIT_TIMEOUT;
298 case AP_RESPONSE_BUSY:
299 return AP_WAIT_TIMEOUT;
300 case AP_RESPONSE_Q_NOT_AVAIL:
301 case AP_RESPONSE_DECONFIGURED:
302 case AP_RESPONSE_CHECKSTOPPED:
303 default:
304 aq->state = AP_STATE_BORKED;
305 return AP_WAIT_NONE;
306 }
307}
308
309/**
310 * ap_sm_reset_wait(): Test queue for completion of the reset operation
311 * @aq: pointer to the AP queue
312 *
313 * Returns AP_POLL_IMMEDIATELY, AP_POLL_AFTER_TIMEROUT or 0.
314 */
315static enum ap_wait ap_sm_reset_wait(struct ap_queue *aq)
316{
317 struct ap_queue_status status;
318 void *lsi_ptr;
319
320 if (aq->queue_count > 0 && aq->reply)
321 /* Try to read a completed message and get the status */
322 status = ap_sm_recv(aq);
323 else
324 /* Get the status with TAPQ */
325 status = ap_tapq(aq->qid, NULL);
326
327 switch (status.response_code) {
328 case AP_RESPONSE_NORMAL:
329 lsi_ptr = ap_airq_ptr();
330 if (lsi_ptr && ap_queue_enable_interruption(aq, lsi_ptr) == 0)
331 aq->state = AP_STATE_SETIRQ_WAIT;
332 else
333 aq->state = (aq->queue_count > 0) ?
334 AP_STATE_WORKING : AP_STATE_IDLE;
335 return AP_WAIT_AGAIN;
336 case AP_RESPONSE_BUSY:
337 case AP_RESPONSE_RESET_IN_PROGRESS:
338 return AP_WAIT_TIMEOUT;
339 case AP_RESPONSE_Q_NOT_AVAIL:
340 case AP_RESPONSE_DECONFIGURED:
341 case AP_RESPONSE_CHECKSTOPPED:
342 default:
343 aq->state = AP_STATE_BORKED;
344 return AP_WAIT_NONE;
345 }
346}
347
348/**
349 * ap_sm_setirq_wait(): Test queue for completion of the irq enablement
350 * @aq: pointer to the AP queue
351 *
352 * Returns AP_POLL_IMMEDIATELY, AP_POLL_AFTER_TIMEROUT or 0.
353 */
354static enum ap_wait ap_sm_setirq_wait(struct ap_queue *aq)
355{
356 struct ap_queue_status status;
357
358 if (aq->queue_count > 0 && aq->reply)
359 /* Try to read a completed message and get the status */
360 status = ap_sm_recv(aq);
361 else
362 /* Get the status with TAPQ */
363 status = ap_tapq(aq->qid, NULL);
364
365 if (status.int_enabled == 1) {
366 /* Irqs are now enabled */
367 aq->interrupt = AP_INTR_ENABLED;
368 aq->state = (aq->queue_count > 0) ?
369 AP_STATE_WORKING : AP_STATE_IDLE;
370 }
371
372 switch (status.response_code) {
373 case AP_RESPONSE_NORMAL:
374 if (aq->queue_count > 0)
375 return AP_WAIT_AGAIN;
376 /* fallthrough */
377 case AP_RESPONSE_NO_PENDING_REPLY:
378 return AP_WAIT_TIMEOUT;
379 default:
380 aq->state = AP_STATE_BORKED;
381 return AP_WAIT_NONE;
382 }
383}
384
385/*
386 * AP state machine jump table
387 */
388static ap_func_t *ap_jumptable[NR_AP_STATES][NR_AP_EVENTS] = {
389 [AP_STATE_RESET_START] = {
390 [AP_EVENT_POLL] = ap_sm_reset,
391 [AP_EVENT_TIMEOUT] = ap_sm_nop,
392 },
393 [AP_STATE_RESET_WAIT] = {
394 [AP_EVENT_POLL] = ap_sm_reset_wait,
395 [AP_EVENT_TIMEOUT] = ap_sm_nop,
396 },
397 [AP_STATE_SETIRQ_WAIT] = {
398 [AP_EVENT_POLL] = ap_sm_setirq_wait,
399 [AP_EVENT_TIMEOUT] = ap_sm_nop,
400 },
401 [AP_STATE_IDLE] = {
402 [AP_EVENT_POLL] = ap_sm_write,
403 [AP_EVENT_TIMEOUT] = ap_sm_nop,
404 },
405 [AP_STATE_WORKING] = {
406 [AP_EVENT_POLL] = ap_sm_read_write,
407 [AP_EVENT_TIMEOUT] = ap_sm_reset,
408 },
409 [AP_STATE_QUEUE_FULL] = {
410 [AP_EVENT_POLL] = ap_sm_read,
411 [AP_EVENT_TIMEOUT] = ap_sm_reset,
412 },
413 [AP_STATE_SUSPEND_WAIT] = {
414 [AP_EVENT_POLL] = ap_sm_suspend_read,
415 [AP_EVENT_TIMEOUT] = ap_sm_nop,
416 },
417 [AP_STATE_BORKED] = {
418 [AP_EVENT_POLL] = ap_sm_nop,
419 [AP_EVENT_TIMEOUT] = ap_sm_nop,
420 },
421};
422
423enum ap_wait ap_sm_event(struct ap_queue *aq, enum ap_event event)
424{
425 return ap_jumptable[aq->state][event](aq);
426}
427
428enum ap_wait ap_sm_event_loop(struct ap_queue *aq, enum ap_event event)
429{
430 enum ap_wait wait;
431
432 while ((wait = ap_sm_event(aq, event)) == AP_WAIT_AGAIN)
433 ;
434 return wait;
435}
436
437/*
438 * Power management for queue devices
439 */
440void ap_queue_suspend(struct ap_device *ap_dev)
441{
442 struct ap_queue *aq = to_ap_queue(&ap_dev->device);
443
444 /* Poll on the device until all requests are finished. */
445 spin_lock_bh(&aq->lock);
446 aq->state = AP_STATE_SUSPEND_WAIT;
447 while (ap_sm_event(aq, AP_EVENT_POLL) != AP_WAIT_NONE)
448 ;
449 aq->state = AP_STATE_BORKED;
450 spin_unlock_bh(&aq->lock);
451}
452EXPORT_SYMBOL(ap_queue_suspend);
453
454void ap_queue_resume(struct ap_device *ap_dev)
455{
456}
457EXPORT_SYMBOL(ap_queue_resume);
458
459/*
460 * AP queue related attributes.
461 */
462static ssize_t ap_request_count_show(struct device *dev,
463 struct device_attribute *attr,
464 char *buf)
465{
466 struct ap_queue *aq = to_ap_queue(dev);
467 unsigned int req_cnt;
468
469 spin_lock_bh(&aq->lock);
470 req_cnt = aq->total_request_count;
471 spin_unlock_bh(&aq->lock);
472 return snprintf(buf, PAGE_SIZE, "%d\n", req_cnt);
473}
474
475static DEVICE_ATTR(request_count, 0444, ap_request_count_show, NULL);
476
477static ssize_t ap_requestq_count_show(struct device *dev,
478 struct device_attribute *attr, char *buf)
479{
480 struct ap_queue *aq = to_ap_queue(dev);
481 unsigned int reqq_cnt = 0;
482
483 spin_lock_bh(&aq->lock);
484 reqq_cnt = aq->requestq_count;
485 spin_unlock_bh(&aq->lock);
486 return snprintf(buf, PAGE_SIZE, "%d\n", reqq_cnt);
487}
488
489static DEVICE_ATTR(requestq_count, 0444, ap_requestq_count_show, NULL);
490
491static ssize_t ap_pendingq_count_show(struct device *dev,
492 struct device_attribute *attr, char *buf)
493{
494 struct ap_queue *aq = to_ap_queue(dev);
495 unsigned int penq_cnt = 0;
496
497 spin_lock_bh(&aq->lock);
498 penq_cnt = aq->pendingq_count;
499 spin_unlock_bh(&aq->lock);
500 return snprintf(buf, PAGE_SIZE, "%d\n", penq_cnt);
501}
502
503static DEVICE_ATTR(pendingq_count, 0444, ap_pendingq_count_show, NULL);
504
505static ssize_t ap_reset_show(struct device *dev,
506 struct device_attribute *attr, char *buf)
507{
508 struct ap_queue *aq = to_ap_queue(dev);
509 int rc = 0;
510
511 spin_lock_bh(&aq->lock);
512 switch (aq->state) {
513 case AP_STATE_RESET_START:
514 case AP_STATE_RESET_WAIT:
515 rc = snprintf(buf, PAGE_SIZE, "Reset in progress.\n");
516 break;
517 case AP_STATE_WORKING:
518 case AP_STATE_QUEUE_FULL:
519 rc = snprintf(buf, PAGE_SIZE, "Reset Timer armed.\n");
520 break;
521 default:
522 rc = snprintf(buf, PAGE_SIZE, "No Reset Timer set.\n");
523 }
524 spin_unlock_bh(&aq->lock);
525 return rc;
526}
527
528static DEVICE_ATTR(reset, 0444, ap_reset_show, NULL);
529
530static ssize_t ap_interrupt_show(struct device *dev,
531 struct device_attribute *attr, char *buf)
532{
533 struct ap_queue *aq = to_ap_queue(dev);
534 int rc = 0;
535
536 spin_lock_bh(&aq->lock);
537 if (aq->state == AP_STATE_SETIRQ_WAIT)
538 rc = snprintf(buf, PAGE_SIZE, "Enable Interrupt pending.\n");
539 else if (aq->interrupt == AP_INTR_ENABLED)
540 rc = snprintf(buf, PAGE_SIZE, "Interrupts enabled.\n");
541 else
542 rc = snprintf(buf, PAGE_SIZE, "Interrupts disabled.\n");
543 spin_unlock_bh(&aq->lock);
544 return rc;
545}
546
547static DEVICE_ATTR(interrupt, 0444, ap_interrupt_show, NULL);
548
549static struct attribute *ap_queue_dev_attrs[] = {
550 &dev_attr_request_count.attr,
551 &dev_attr_requestq_count.attr,
552 &dev_attr_pendingq_count.attr,
553 &dev_attr_reset.attr,
554 &dev_attr_interrupt.attr,
555 NULL
556};
557
558static struct attribute_group ap_queue_dev_attr_group = {
559 .attrs = ap_queue_dev_attrs
560};
561
562static const struct attribute_group *ap_queue_dev_attr_groups[] = {
563 &ap_queue_dev_attr_group,
564 NULL
565};
566
567struct device_type ap_queue_type = {
568 .name = "ap_queue",
569 .groups = ap_queue_dev_attr_groups,
570};
571
572static void ap_queue_device_release(struct device *dev)
573{
574 kfree(to_ap_queue(dev));
575}
576
577struct ap_queue *ap_queue_create(ap_qid_t qid, int device_type)
578{
579 struct ap_queue *aq;
580
581 aq = kzalloc(sizeof(*aq), GFP_KERNEL);
582 if (!aq)
583 return NULL;
584 aq->ap_dev.device.release = ap_queue_device_release;
585 aq->ap_dev.device.type = &ap_queue_type;
586 aq->ap_dev.device_type = device_type;
587 /* CEX6 toleration: map to CEX5 */
588 if (device_type == AP_DEVICE_TYPE_CEX6)
589 aq->ap_dev.device_type = AP_DEVICE_TYPE_CEX5;
590 aq->qid = qid;
591 aq->state = AP_STATE_RESET_START;
592 aq->interrupt = AP_INTR_DISABLED;
593 spin_lock_init(&aq->lock);
594 INIT_LIST_HEAD(&aq->pendingq);
595 INIT_LIST_HEAD(&aq->requestq);
596 setup_timer(&aq->timeout, ap_request_timeout, (unsigned long) aq);
597
598 return aq;
599}
600
601void ap_queue_init_reply(struct ap_queue *aq, struct ap_message *reply)
602{
603 aq->reply = reply;
604
605 spin_lock_bh(&aq->lock);
606 ap_wait(ap_sm_event(aq, AP_EVENT_POLL));
607 spin_unlock_bh(&aq->lock);
608}
609EXPORT_SYMBOL(ap_queue_init_reply);
610
611/**
612 * ap_queue_message(): Queue a request to an AP device.
613 * @aq: The AP device to queue the message to
614 * @ap_msg: The message that is to be added
615 */
616void ap_queue_message(struct ap_queue *aq, struct ap_message *ap_msg)
617{
618 /* For asynchronous message handling a valid receive-callback
619 * is required.
620 */
621 BUG_ON(!ap_msg->receive);
622
623 spin_lock_bh(&aq->lock);
624 /* Queue the message. */
625 list_add_tail(&ap_msg->list, &aq->requestq);
626 aq->requestq_count++;
627 aq->total_request_count++;
628 atomic_inc(&aq->card->total_request_count);
629 /* Send/receive as many request from the queue as possible. */
630 ap_wait(ap_sm_event_loop(aq, AP_EVENT_POLL));
631 spin_unlock_bh(&aq->lock);
632}
633EXPORT_SYMBOL(ap_queue_message);
634
635/**
636 * ap_cancel_message(): Cancel a crypto request.
637 * @aq: The AP device that has the message queued
638 * @ap_msg: The message that is to be removed
639 *
640 * Cancel a crypto request. This is done by removing the request
641 * from the device pending or request queue. Note that the
642 * request stays on the AP queue. When it finishes the message
643 * reply will be discarded because the psmid can't be found.
644 */
645void ap_cancel_message(struct ap_queue *aq, struct ap_message *ap_msg)
646{
647 struct ap_message *tmp;
648
649 spin_lock_bh(&aq->lock);
650 if (!list_empty(&ap_msg->list)) {
651 list_for_each_entry(tmp, &aq->pendingq, list)
652 if (tmp->psmid == ap_msg->psmid) {
653 aq->pendingq_count--;
654 goto found;
655 }
656 aq->requestq_count--;
657found:
658 list_del_init(&ap_msg->list);
659 }
660 spin_unlock_bh(&aq->lock);
661}
662EXPORT_SYMBOL(ap_cancel_message);
663
664/**
665 * __ap_flush_queue(): Flush requests.
666 * @aq: Pointer to the AP queue
667 *
668 * Flush all requests from the request/pending queue of an AP device.
669 */
670static void __ap_flush_queue(struct ap_queue *aq)
671{
672 struct ap_message *ap_msg, *next;
673
674 list_for_each_entry_safe(ap_msg, next, &aq->pendingq, list) {
675 list_del_init(&ap_msg->list);
676 aq->pendingq_count--;
677 ap_msg->rc = -EAGAIN;
678 ap_msg->receive(aq, ap_msg, NULL);
679 }
680 list_for_each_entry_safe(ap_msg, next, &aq->requestq, list) {
681 list_del_init(&ap_msg->list);
682 aq->requestq_count--;
683 ap_msg->rc = -EAGAIN;
684 ap_msg->receive(aq, ap_msg, NULL);
685 }
686}
687
688void ap_flush_queue(struct ap_queue *aq)
689{
690 spin_lock_bh(&aq->lock);
691 __ap_flush_queue(aq);
692 spin_unlock_bh(&aq->lock);
693}
694EXPORT_SYMBOL(ap_flush_queue);
695
696void ap_queue_remove(struct ap_queue *aq)
697{
698 ap_flush_queue(aq);
699 del_timer_sync(&aq->timeout);
700}
701EXPORT_SYMBOL(ap_queue_remove);
diff --git a/drivers/s390/crypto/zcrypt_api.c b/drivers/s390/crypto/zcrypt_api.c
index 5d3d04c040c2..854a6e58dfea 100644
--- a/drivers/s390/crypto/zcrypt_api.c
+++ b/drivers/s390/crypto/zcrypt_api.c
@@ -41,10 +41,14 @@
41#include <linux/debugfs.h> 41#include <linux/debugfs.h>
42#include <asm/debug.h> 42#include <asm/debug.h>
43 43
44#include "zcrypt_debug.h" 44#define CREATE_TRACE_POINTS
45#include <asm/trace/zcrypt.h>
46
45#include "zcrypt_api.h" 47#include "zcrypt_api.h"
48#include "zcrypt_debug.h"
46 49
47#include "zcrypt_msgtype6.h" 50#include "zcrypt_msgtype6.h"
51#include "zcrypt_msgtype50.h"
48 52
49/* 53/*
50 * Module description. 54 * Module description.
@@ -54,76 +58,31 @@ MODULE_DESCRIPTION("Cryptographic Coprocessor interface, " \
54 "Copyright IBM Corp. 2001, 2012"); 58 "Copyright IBM Corp. 2001, 2012");
55MODULE_LICENSE("GPL"); 59MODULE_LICENSE("GPL");
56 60
61/*
62 * zcrypt tracepoint functions
63 */
64EXPORT_TRACEPOINT_SYMBOL(s390_zcrypt_req);
65EXPORT_TRACEPOINT_SYMBOL(s390_zcrypt_rep);
66
57static int zcrypt_hwrng_seed = 1; 67static int zcrypt_hwrng_seed = 1;
58module_param_named(hwrng_seed, zcrypt_hwrng_seed, int, S_IRUSR|S_IRGRP); 68module_param_named(hwrng_seed, zcrypt_hwrng_seed, int, S_IRUSR|S_IRGRP);
59MODULE_PARM_DESC(hwrng_seed, "Turn on/off hwrng auto seed, default is 1 (on)."); 69MODULE_PARM_DESC(hwrng_seed, "Turn on/off hwrng auto seed, default is 1 (on).");
60 70
61static DEFINE_SPINLOCK(zcrypt_device_lock); 71DEFINE_SPINLOCK(zcrypt_list_lock);
62static LIST_HEAD(zcrypt_device_list); 72LIST_HEAD(zcrypt_card_list);
63static int zcrypt_device_count = 0; 73int zcrypt_device_count;
74
64static atomic_t zcrypt_open_count = ATOMIC_INIT(0); 75static atomic_t zcrypt_open_count = ATOMIC_INIT(0);
65static atomic_t zcrypt_rescan_count = ATOMIC_INIT(0); 76static atomic_t zcrypt_rescan_count = ATOMIC_INIT(0);
66 77
67atomic_t zcrypt_rescan_req = ATOMIC_INIT(0); 78atomic_t zcrypt_rescan_req = ATOMIC_INIT(0);
68EXPORT_SYMBOL(zcrypt_rescan_req); 79EXPORT_SYMBOL(zcrypt_rescan_req);
69 80
70static int zcrypt_rng_device_add(void);
71static void zcrypt_rng_device_remove(void);
72
73static DEFINE_SPINLOCK(zcrypt_ops_list_lock);
74static LIST_HEAD(zcrypt_ops_list); 81static LIST_HEAD(zcrypt_ops_list);
75 82
76static debug_info_t *zcrypt_dbf_common; 83/* Zcrypt related debug feature stuff. */
77static debug_info_t *zcrypt_dbf_devices; 84static struct dentry *zcrypt_dbf_root;
78static struct dentry *debugfs_root; 85debug_info_t *zcrypt_dbf_info;
79
80/*
81 * Device attributes common for all crypto devices.
82 */
83static ssize_t zcrypt_type_show(struct device *dev,
84 struct device_attribute *attr, char *buf)
85{
86 struct zcrypt_device *zdev = to_ap_dev(dev)->private;
87 return snprintf(buf, PAGE_SIZE, "%s\n", zdev->type_string);
88}
89
90static DEVICE_ATTR(type, 0444, zcrypt_type_show, NULL);
91
92static ssize_t zcrypt_online_show(struct device *dev,
93 struct device_attribute *attr, char *buf)
94{
95 struct zcrypt_device *zdev = to_ap_dev(dev)->private;
96 return snprintf(buf, PAGE_SIZE, "%d\n", zdev->online);
97}
98
99static ssize_t zcrypt_online_store(struct device *dev,
100 struct device_attribute *attr,
101 const char *buf, size_t count)
102{
103 struct zcrypt_device *zdev = to_ap_dev(dev)->private;
104 int online;
105
106 if (sscanf(buf, "%d\n", &online) != 1 || online < 0 || online > 1)
107 return -EINVAL;
108 zdev->online = online;
109 ZCRYPT_DBF_DEV(DBF_INFO, zdev, "dev%04xo%dman", zdev->ap_dev->qid,
110 zdev->online);
111 if (!online)
112 ap_flush_queue(zdev->ap_dev);
113 return count;
114}
115
116static DEVICE_ATTR(online, 0644, zcrypt_online_show, zcrypt_online_store);
117
118static struct attribute * zcrypt_device_attrs[] = {
119 &dev_attr_type.attr,
120 &dev_attr_online.attr,
121 NULL,
122};
123
124static struct attribute_group zcrypt_device_attr_group = {
125 .attrs = zcrypt_device_attrs,
126};
127 86
128/** 87/**
129 * Process a rescan of the transport layer. 88 * Process a rescan of the transport layer.
@@ -136,242 +95,34 @@ static inline int zcrypt_process_rescan(void)
136 atomic_set(&zcrypt_rescan_req, 0); 95 atomic_set(&zcrypt_rescan_req, 0);
137 atomic_inc(&zcrypt_rescan_count); 96 atomic_inc(&zcrypt_rescan_count);
138 ap_bus_force_rescan(); 97 ap_bus_force_rescan();
139 ZCRYPT_DBF_COMMON(DBF_INFO, "rescan%07d", 98 ZCRYPT_DBF(DBF_INFO, "rescan count=%07d",
140 atomic_inc_return(&zcrypt_rescan_count)); 99 atomic_inc_return(&zcrypt_rescan_count));
141 return 1; 100 return 1;
142 } 101 }
143 return 0; 102 return 0;
144} 103}
145 104
146/**
147 * __zcrypt_increase_preference(): Increase preference of a crypto device.
148 * @zdev: Pointer the crypto device
149 *
150 * Move the device towards the head of the device list.
151 * Need to be called while holding the zcrypt device list lock.
152 * Note: cards with speed_rating of 0 are kept at the end of the list.
153 */
154static void __zcrypt_increase_preference(struct zcrypt_device *zdev)
155{
156 struct zcrypt_device *tmp;
157 struct list_head *l;
158
159 if (zdev->speed_rating == 0)
160 return;
161 for (l = zdev->list.prev; l != &zcrypt_device_list; l = l->prev) {
162 tmp = list_entry(l, struct zcrypt_device, list);
163 if ((tmp->request_count + 1) * tmp->speed_rating <=
164 (zdev->request_count + 1) * zdev->speed_rating &&
165 tmp->speed_rating != 0)
166 break;
167 }
168 if (l == zdev->list.prev)
169 return;
170 /* Move zdev behind l */
171 list_move(&zdev->list, l);
172}
173
174/**
175 * __zcrypt_decrease_preference(): Decrease preference of a crypto device.
176 * @zdev: Pointer to a crypto device.
177 *
178 * Move the device towards the tail of the device list.
179 * Need to be called while holding the zcrypt device list lock.
180 * Note: cards with speed_rating of 0 are kept at the end of the list.
181 */
182static void __zcrypt_decrease_preference(struct zcrypt_device *zdev)
183{
184 struct zcrypt_device *tmp;
185 struct list_head *l;
186
187 if (zdev->speed_rating == 0)
188 return;
189 for (l = zdev->list.next; l != &zcrypt_device_list; l = l->next) {
190 tmp = list_entry(l, struct zcrypt_device, list);
191 if ((tmp->request_count + 1) * tmp->speed_rating >
192 (zdev->request_count + 1) * zdev->speed_rating ||
193 tmp->speed_rating == 0)
194 break;
195 }
196 if (l == zdev->list.next)
197 return;
198 /* Move zdev before l */
199 list_move_tail(&zdev->list, l);
200}
201
202static void zcrypt_device_release(struct kref *kref)
203{
204 struct zcrypt_device *zdev =
205 container_of(kref, struct zcrypt_device, refcount);
206 zcrypt_device_free(zdev);
207}
208
209void zcrypt_device_get(struct zcrypt_device *zdev)
210{
211 kref_get(&zdev->refcount);
212}
213EXPORT_SYMBOL(zcrypt_device_get);
214
215int zcrypt_device_put(struct zcrypt_device *zdev)
216{
217 return kref_put(&zdev->refcount, zcrypt_device_release);
218}
219EXPORT_SYMBOL(zcrypt_device_put);
220
221struct zcrypt_device *zcrypt_device_alloc(size_t max_response_size)
222{
223 struct zcrypt_device *zdev;
224
225 zdev = kzalloc(sizeof(struct zcrypt_device), GFP_KERNEL);
226 if (!zdev)
227 return NULL;
228 zdev->reply.message = kmalloc(max_response_size, GFP_KERNEL);
229 if (!zdev->reply.message)
230 goto out_free;
231 zdev->reply.length = max_response_size;
232 spin_lock_init(&zdev->lock);
233 INIT_LIST_HEAD(&zdev->list);
234 zdev->dbf_area = zcrypt_dbf_devices;
235 return zdev;
236
237out_free:
238 kfree(zdev);
239 return NULL;
240}
241EXPORT_SYMBOL(zcrypt_device_alloc);
242
243void zcrypt_device_free(struct zcrypt_device *zdev)
244{
245 kfree(zdev->reply.message);
246 kfree(zdev);
247}
248EXPORT_SYMBOL(zcrypt_device_free);
249
250/**
251 * zcrypt_device_register() - Register a crypto device.
252 * @zdev: Pointer to a crypto device
253 *
254 * Register a crypto device. Returns 0 if successful.
255 */
256int zcrypt_device_register(struct zcrypt_device *zdev)
257{
258 int rc;
259
260 if (!zdev->ops)
261 return -ENODEV;
262 rc = sysfs_create_group(&zdev->ap_dev->device.kobj,
263 &zcrypt_device_attr_group);
264 if (rc)
265 goto out;
266 get_device(&zdev->ap_dev->device);
267 kref_init(&zdev->refcount);
268 spin_lock_bh(&zcrypt_device_lock);
269 zdev->online = 1; /* New devices are online by default. */
270 ZCRYPT_DBF_DEV(DBF_INFO, zdev, "dev%04xo%dreg", zdev->ap_dev->qid,
271 zdev->online);
272 list_add_tail(&zdev->list, &zcrypt_device_list);
273 __zcrypt_increase_preference(zdev);
274 zcrypt_device_count++;
275 spin_unlock_bh(&zcrypt_device_lock);
276 if (zdev->ops->rng) {
277 rc = zcrypt_rng_device_add();
278 if (rc)
279 goto out_unregister;
280 }
281 return 0;
282
283out_unregister:
284 spin_lock_bh(&zcrypt_device_lock);
285 zcrypt_device_count--;
286 list_del_init(&zdev->list);
287 spin_unlock_bh(&zcrypt_device_lock);
288 sysfs_remove_group(&zdev->ap_dev->device.kobj,
289 &zcrypt_device_attr_group);
290 put_device(&zdev->ap_dev->device);
291 zcrypt_device_put(zdev);
292out:
293 return rc;
294}
295EXPORT_SYMBOL(zcrypt_device_register);
296
297/**
298 * zcrypt_device_unregister(): Unregister a crypto device.
299 * @zdev: Pointer to crypto device
300 *
301 * Unregister a crypto device.
302 */
303void zcrypt_device_unregister(struct zcrypt_device *zdev)
304{
305 if (zdev->ops->rng)
306 zcrypt_rng_device_remove();
307 spin_lock_bh(&zcrypt_device_lock);
308 zcrypt_device_count--;
309 list_del_init(&zdev->list);
310 spin_unlock_bh(&zcrypt_device_lock);
311 sysfs_remove_group(&zdev->ap_dev->device.kobj,
312 &zcrypt_device_attr_group);
313 put_device(&zdev->ap_dev->device);
314 zcrypt_device_put(zdev);
315}
316EXPORT_SYMBOL(zcrypt_device_unregister);
317
318void zcrypt_msgtype_register(struct zcrypt_ops *zops) 105void zcrypt_msgtype_register(struct zcrypt_ops *zops)
319{ 106{
320 spin_lock_bh(&zcrypt_ops_list_lock);
321 list_add_tail(&zops->list, &zcrypt_ops_list); 107 list_add_tail(&zops->list, &zcrypt_ops_list);
322 spin_unlock_bh(&zcrypt_ops_list_lock);
323} 108}
324EXPORT_SYMBOL(zcrypt_msgtype_register);
325 109
326void zcrypt_msgtype_unregister(struct zcrypt_ops *zops) 110void zcrypt_msgtype_unregister(struct zcrypt_ops *zops)
327{ 111{
328 spin_lock_bh(&zcrypt_ops_list_lock);
329 list_del_init(&zops->list); 112 list_del_init(&zops->list);
330 spin_unlock_bh(&zcrypt_ops_list_lock);
331} 113}
332EXPORT_SYMBOL(zcrypt_msgtype_unregister);
333 114
334static inline 115struct zcrypt_ops *zcrypt_msgtype(unsigned char *name, int variant)
335struct zcrypt_ops *__ops_lookup(unsigned char *name, int variant)
336{ 116{
337 struct zcrypt_ops *zops; 117 struct zcrypt_ops *zops;
338 int found = 0;
339 118
340 spin_lock_bh(&zcrypt_ops_list_lock); 119 list_for_each_entry(zops, &zcrypt_ops_list, list)
341 list_for_each_entry(zops, &zcrypt_ops_list, list) {
342 if ((zops->variant == variant) && 120 if ((zops->variant == variant) &&
343 (!strncmp(zops->name, name, sizeof(zops->name)))) { 121 (!strncmp(zops->name, name, sizeof(zops->name))))
344 found = 1; 122 return zops;
345 break; 123 return NULL;
346 }
347 }
348 if (!found || !try_module_get(zops->owner))
349 zops = NULL;
350
351 spin_unlock_bh(&zcrypt_ops_list_lock);
352
353 return zops;
354}
355
356struct zcrypt_ops *zcrypt_msgtype_request(unsigned char *name, int variant)
357{
358 struct zcrypt_ops *zops = NULL;
359
360 zops = __ops_lookup(name, variant);
361 if (!zops) {
362 request_module("%s", name);
363 zops = __ops_lookup(name, variant);
364 }
365 return zops;
366}
367EXPORT_SYMBOL(zcrypt_msgtype_request);
368
369void zcrypt_msgtype_release(struct zcrypt_ops *zops)
370{
371 if (zops)
372 module_put(zops->owner);
373} 124}
374EXPORT_SYMBOL(zcrypt_msgtype_release); 125EXPORT_SYMBOL(zcrypt_msgtype);
375 126
376/** 127/**
377 * zcrypt_read (): Not supported beyond zcrypt 1.3.1. 128 * zcrypt_read (): Not supported beyond zcrypt 1.3.1.
@@ -417,16 +168,80 @@ static int zcrypt_release(struct inode *inode, struct file *filp)
417 return 0; 168 return 0;
418} 169}
419 170
171static inline struct zcrypt_queue *zcrypt_pick_queue(struct zcrypt_card *zc,
172 struct zcrypt_queue *zq,
173 unsigned int weight)
174{
175 if (!zq || !try_module_get(zq->queue->ap_dev.drv->driver.owner))
176 return NULL;
177 zcrypt_queue_get(zq);
178 get_device(&zq->queue->ap_dev.device);
179 atomic_add(weight, &zc->load);
180 atomic_add(weight, &zq->load);
181 zq->request_count++;
182 return zq;
183}
184
185static inline void zcrypt_drop_queue(struct zcrypt_card *zc,
186 struct zcrypt_queue *zq,
187 unsigned int weight)
188{
189 struct module *mod = zq->queue->ap_dev.drv->driver.owner;
190
191 zq->request_count--;
192 atomic_sub(weight, &zc->load);
193 atomic_sub(weight, &zq->load);
194 put_device(&zq->queue->ap_dev.device);
195 zcrypt_queue_put(zq);
196 module_put(mod);
197}
198
199static inline bool zcrypt_card_compare(struct zcrypt_card *zc,
200 struct zcrypt_card *pref_zc,
201 unsigned weight, unsigned pref_weight)
202{
203 if (!pref_zc)
204 return 0;
205 weight += atomic_read(&zc->load);
206 pref_weight += atomic_read(&pref_zc->load);
207 if (weight == pref_weight)
208 return atomic_read(&zc->card->total_request_count) >
209 atomic_read(&pref_zc->card->total_request_count);
210 return weight > pref_weight;
211}
212
213static inline bool zcrypt_queue_compare(struct zcrypt_queue *zq,
214 struct zcrypt_queue *pref_zq,
215 unsigned weight, unsigned pref_weight)
216{
217 if (!pref_zq)
218 return 0;
219 weight += atomic_read(&zq->load);
220 pref_weight += atomic_read(&pref_zq->load);
221 if (weight == pref_weight)
222 return &zq->queue->total_request_count >
223 &pref_zq->queue->total_request_count;
224 return weight > pref_weight;
225}
226
420/* 227/*
421 * zcrypt ioctls. 228 * zcrypt ioctls.
422 */ 229 */
423static long zcrypt_rsa_modexpo(struct ica_rsa_modexpo *mex) 230static long zcrypt_rsa_modexpo(struct ica_rsa_modexpo *mex)
424{ 231{
425 struct zcrypt_device *zdev; 232 struct zcrypt_card *zc, *pref_zc;
426 int rc; 233 struct zcrypt_queue *zq, *pref_zq;
234 unsigned int weight, pref_weight;
235 unsigned int func_code;
236 int qid = 0, rc = -ENODEV;
237
238 trace_s390_zcrypt_req(mex, TP_ICARSAMODEXPO);
239
240 if (mex->outputdatalength < mex->inputdatalength) {
241 rc = -EINVAL;
242 goto out;
243 }
427 244
428 if (mex->outputdatalength < mex->inputdatalength)
429 return -EINVAL;
430 /* 245 /*
431 * As long as outputdatalength is big enough, we can set the 246 * As long as outputdatalength is big enough, we can set the
432 * outputdatalength equal to the inputdatalength, since that is the 247 * outputdatalength equal to the inputdatalength, since that is the
@@ -434,44 +249,73 @@ static long zcrypt_rsa_modexpo(struct ica_rsa_modexpo *mex)
434 */ 249 */
435 mex->outputdatalength = mex->inputdatalength; 250 mex->outputdatalength = mex->inputdatalength;
436 251
437 spin_lock_bh(&zcrypt_device_lock); 252 rc = get_rsa_modex_fc(mex, &func_code);
438 list_for_each_entry(zdev, &zcrypt_device_list, list) { 253 if (rc)
439 if (!zdev->online || 254 goto out;
440 !zdev->ops->rsa_modexpo || 255
441 zdev->min_mod_size > mex->inputdatalength || 256 pref_zc = NULL;
442 zdev->max_mod_size < mex->inputdatalength) 257 pref_zq = NULL;
258 spin_lock(&zcrypt_list_lock);
259 for_each_zcrypt_card(zc) {
260 /* Check for online accelarator and CCA cards */
261 if (!zc->online || !(zc->card->functions & 0x18000000))
262 continue;
263 /* Check for size limits */
264 if (zc->min_mod_size > mex->inputdatalength ||
265 zc->max_mod_size < mex->inputdatalength)
266 continue;
267 /* get weight index of the card device */
268 weight = zc->speed_rating[func_code];
269 if (zcrypt_card_compare(zc, pref_zc, weight, pref_weight))
443 continue; 270 continue;
444 zcrypt_device_get(zdev); 271 for_each_zcrypt_queue(zq, zc) {
445 get_device(&zdev->ap_dev->device); 272 /* check if device is online and eligible */
446 zdev->request_count++; 273 if (!zq->online || !zq->ops->rsa_modexpo)
447 __zcrypt_decrease_preference(zdev); 274 continue;
448 if (try_module_get(zdev->ap_dev->drv->driver.owner)) { 275 if (zcrypt_queue_compare(zq, pref_zq,
449 spin_unlock_bh(&zcrypt_device_lock); 276 weight, pref_weight))
450 rc = zdev->ops->rsa_modexpo(zdev, mex); 277 continue;
451 spin_lock_bh(&zcrypt_device_lock); 278 pref_zc = zc;
452 module_put(zdev->ap_dev->drv->driver.owner); 279 pref_zq = zq;
280 pref_weight = weight;
453 } 281 }
454 else
455 rc = -EAGAIN;
456 zdev->request_count--;
457 __zcrypt_increase_preference(zdev);
458 put_device(&zdev->ap_dev->device);
459 zcrypt_device_put(zdev);
460 spin_unlock_bh(&zcrypt_device_lock);
461 return rc;
462 } 282 }
463 spin_unlock_bh(&zcrypt_device_lock); 283 pref_zq = zcrypt_pick_queue(pref_zc, pref_zq, weight);
464 return -ENODEV; 284 spin_unlock(&zcrypt_list_lock);
285
286 if (!pref_zq) {
287 rc = -ENODEV;
288 goto out;
289 }
290
291 qid = pref_zq->queue->qid;
292 rc = pref_zq->ops->rsa_modexpo(pref_zq, mex);
293
294 spin_lock(&zcrypt_list_lock);
295 zcrypt_drop_queue(pref_zc, pref_zq, weight);
296 spin_unlock(&zcrypt_list_lock);
297
298out:
299 trace_s390_zcrypt_rep(mex, func_code, rc,
300 AP_QID_CARD(qid), AP_QID_QUEUE(qid));
301 return rc;
465} 302}
466 303
467static long zcrypt_rsa_crt(struct ica_rsa_modexpo_crt *crt) 304static long zcrypt_rsa_crt(struct ica_rsa_modexpo_crt *crt)
468{ 305{
469 struct zcrypt_device *zdev; 306 struct zcrypt_card *zc, *pref_zc;
470 unsigned long long z1, z2, z3; 307 struct zcrypt_queue *zq, *pref_zq;
471 int rc, copied; 308 unsigned int weight, pref_weight;
309 unsigned int func_code;
310 int qid = 0, rc = -ENODEV;
311
312 trace_s390_zcrypt_req(crt, TP_ICARSACRT);
313
314 if (crt->outputdatalength < crt->inputdatalength) {
315 rc = -EINVAL;
316 goto out;
317 }
472 318
473 if (crt->outputdatalength < crt->inputdatalength)
474 return -EINVAL;
475 /* 319 /*
476 * As long as outputdatalength is big enough, we can set the 320 * As long as outputdatalength is big enough, we can set the
477 * outputdatalength equal to the inputdatalength, since that is the 321 * outputdatalength equal to the inputdatalength, since that is the
@@ -479,308 +323,445 @@ static long zcrypt_rsa_crt(struct ica_rsa_modexpo_crt *crt)
479 */ 323 */
480 crt->outputdatalength = crt->inputdatalength; 324 crt->outputdatalength = crt->inputdatalength;
481 325
482 copied = 0; 326 rc = get_rsa_crt_fc(crt, &func_code);
483 restart: 327 if (rc)
484 spin_lock_bh(&zcrypt_device_lock); 328 goto out;
485 list_for_each_entry(zdev, &zcrypt_device_list, list) { 329
486 if (!zdev->online || 330 pref_zc = NULL;
487 !zdev->ops->rsa_modexpo_crt || 331 pref_zq = NULL;
488 zdev->min_mod_size > crt->inputdatalength || 332 spin_lock(&zcrypt_list_lock);
489 zdev->max_mod_size < crt->inputdatalength) 333 for_each_zcrypt_card(zc) {
334 /* Check for online accelarator and CCA cards */
335 if (!zc->online || !(zc->card->functions & 0x18000000))
336 continue;
337 /* Check for size limits */
338 if (zc->min_mod_size > crt->inputdatalength ||
339 zc->max_mod_size < crt->inputdatalength)
490 continue; 340 continue;
491 if (zdev->short_crt && crt->inputdatalength > 240) { 341 /* get weight index of the card device */
492 /* 342 weight = zc->speed_rating[func_code];
493 * Check inputdata for leading zeros for cards 343 if (zcrypt_card_compare(zc, pref_zc, weight, pref_weight))
494 * that can't handle np_prime, bp_key, or 344 continue;
495 * u_mult_inv > 128 bytes. 345 for_each_zcrypt_queue(zq, zc) {
496 */ 346 /* check if device is online and eligible */
497 if (copied == 0) { 347 if (!zq->online || !zq->ops->rsa_modexpo_crt)
498 unsigned int len;
499 spin_unlock_bh(&zcrypt_device_lock);
500 /* len is max 256 / 2 - 120 = 8
501 * For bigger device just assume len of leading
502 * 0s is 8 as stated in the requirements for
503 * ica_rsa_modexpo_crt struct in zcrypt.h.
504 */
505 if (crt->inputdatalength <= 256)
506 len = crt->inputdatalength / 2 - 120;
507 else
508 len = 8;
509 if (len > sizeof(z1))
510 return -EFAULT;
511 z1 = z2 = z3 = 0;
512 if (copy_from_user(&z1, crt->np_prime, len) ||
513 copy_from_user(&z2, crt->bp_key, len) ||
514 copy_from_user(&z3, crt->u_mult_inv, len))
515 return -EFAULT;
516 z1 = z2 = z3 = 0;
517 copied = 1;
518 /*
519 * We have to restart device lookup -
520 * the device list may have changed by now.
521 */
522 goto restart;
523 }
524 if (z1 != 0ULL || z2 != 0ULL || z3 != 0ULL)
525 /* The device can't handle this request. */
526 continue; 348 continue;
349 if (zcrypt_queue_compare(zq, pref_zq,
350 weight, pref_weight))
351 continue;
352 pref_zc = zc;
353 pref_zq = zq;
354 pref_weight = weight;
527 } 355 }
528 zcrypt_device_get(zdev);
529 get_device(&zdev->ap_dev->device);
530 zdev->request_count++;
531 __zcrypt_decrease_preference(zdev);
532 if (try_module_get(zdev->ap_dev->drv->driver.owner)) {
533 spin_unlock_bh(&zcrypt_device_lock);
534 rc = zdev->ops->rsa_modexpo_crt(zdev, crt);
535 spin_lock_bh(&zcrypt_device_lock);
536 module_put(zdev->ap_dev->drv->driver.owner);
537 }
538 else
539 rc = -EAGAIN;
540 zdev->request_count--;
541 __zcrypt_increase_preference(zdev);
542 put_device(&zdev->ap_dev->device);
543 zcrypt_device_put(zdev);
544 spin_unlock_bh(&zcrypt_device_lock);
545 return rc;
546 } 356 }
547 spin_unlock_bh(&zcrypt_device_lock); 357 pref_zq = zcrypt_pick_queue(pref_zc, pref_zq, weight);
548 return -ENODEV; 358 spin_unlock(&zcrypt_list_lock);
359
360 if (!pref_zq) {
361 rc = -ENODEV;
362 goto out;
363 }
364
365 qid = pref_zq->queue->qid;
366 rc = pref_zq->ops->rsa_modexpo_crt(pref_zq, crt);
367
368 spin_lock(&zcrypt_list_lock);
369 zcrypt_drop_queue(pref_zc, pref_zq, weight);
370 spin_unlock(&zcrypt_list_lock);
371
372out:
373 trace_s390_zcrypt_rep(crt, func_code, rc,
374 AP_QID_CARD(qid), AP_QID_QUEUE(qid));
375 return rc;
549} 376}
550 377
551static long zcrypt_send_cprb(struct ica_xcRB *xcRB) 378static long zcrypt_send_cprb(struct ica_xcRB *xcRB)
552{ 379{
553 struct zcrypt_device *zdev; 380 struct zcrypt_card *zc, *pref_zc;
554 int rc; 381 struct zcrypt_queue *zq, *pref_zq;
382 struct ap_message ap_msg;
383 unsigned int weight, pref_weight;
384 unsigned int func_code;
385 unsigned short *domain;
386 int qid = 0, rc = -ENODEV;
387
388 trace_s390_zcrypt_req(xcRB, TB_ZSECSENDCPRB);
555 389
556 spin_lock_bh(&zcrypt_device_lock); 390 rc = get_cprb_fc(xcRB, &ap_msg, &func_code, &domain);
557 list_for_each_entry(zdev, &zcrypt_device_list, list) { 391 if (rc)
558 if (!zdev->online || !zdev->ops->send_cprb || 392 goto out;
559 (zdev->ops->variant == MSGTYPE06_VARIANT_EP11) || 393
560 (xcRB->user_defined != AUTOSELECT && 394 pref_zc = NULL;
561 AP_QID_DEVICE(zdev->ap_dev->qid) != xcRB->user_defined)) 395 pref_zq = NULL;
396 spin_lock(&zcrypt_list_lock);
397 for_each_zcrypt_card(zc) {
398 /* Check for online CCA cards */
399 if (!zc->online || !(zc->card->functions & 0x10000000))
400 continue;
401 /* Check for user selected CCA card */
402 if (xcRB->user_defined != AUTOSELECT &&
403 xcRB->user_defined != zc->card->id)
562 continue; 404 continue;
563 zcrypt_device_get(zdev); 405 /* get weight index of the card device */
564 get_device(&zdev->ap_dev->device); 406 weight = speed_idx_cca(func_code) * zc->speed_rating[SECKEY];
565 zdev->request_count++; 407 if (zcrypt_card_compare(zc, pref_zc, weight, pref_weight))
566 __zcrypt_decrease_preference(zdev); 408 continue;
567 if (try_module_get(zdev->ap_dev->drv->driver.owner)) { 409 for_each_zcrypt_queue(zq, zc) {
568 spin_unlock_bh(&zcrypt_device_lock); 410 /* check if device is online and eligible */
569 rc = zdev->ops->send_cprb(zdev, xcRB); 411 if (!zq->online ||
570 spin_lock_bh(&zcrypt_device_lock); 412 !zq->ops->send_cprb ||
571 module_put(zdev->ap_dev->drv->driver.owner); 413 ((*domain != (unsigned short) AUTOSELECT) &&
414 (*domain != AP_QID_QUEUE(zq->queue->qid))))
415 continue;
416 if (zcrypt_queue_compare(zq, pref_zq,
417 weight, pref_weight))
418 continue;
419 pref_zc = zc;
420 pref_zq = zq;
421 pref_weight = weight;
572 } 422 }
573 else
574 rc = -EAGAIN;
575 zdev->request_count--;
576 __zcrypt_increase_preference(zdev);
577 put_device(&zdev->ap_dev->device);
578 zcrypt_device_put(zdev);
579 spin_unlock_bh(&zcrypt_device_lock);
580 return rc;
581 } 423 }
582 spin_unlock_bh(&zcrypt_device_lock); 424 pref_zq = zcrypt_pick_queue(pref_zc, pref_zq, weight);
583 return -ENODEV; 425 spin_unlock(&zcrypt_list_lock);
584}
585 426
586struct ep11_target_dev_list { 427 if (!pref_zq) {
587 unsigned short targets_num; 428 rc = -ENODEV;
588 struct ep11_target_dev *targets; 429 goto out;
589}; 430 }
431
432 /* in case of auto select, provide the correct domain */
433 qid = pref_zq->queue->qid;
434 if (*domain == (unsigned short) AUTOSELECT)
435 *domain = AP_QID_QUEUE(qid);
590 436
591static bool is_desired_ep11dev(unsigned int dev_qid, 437 rc = pref_zq->ops->send_cprb(pref_zq, xcRB, &ap_msg);
592 struct ep11_target_dev_list dev_list) 438
439 spin_lock(&zcrypt_list_lock);
440 zcrypt_drop_queue(pref_zc, pref_zq, weight);
441 spin_unlock(&zcrypt_list_lock);
442
443out:
444 trace_s390_zcrypt_rep(xcRB, func_code, rc,
445 AP_QID_CARD(qid), AP_QID_QUEUE(qid));
446 return rc;
447}
448
449static bool is_desired_ep11_card(unsigned int dev_id,
450 unsigned short target_num,
451 struct ep11_target_dev *targets)
593{ 452{
594 int n; 453 while (target_num-- > 0) {
454 if (dev_id == targets->ap_id)
455 return true;
456 targets++;
457 }
458 return false;
459}
595 460
596 for (n = 0; n < dev_list.targets_num; n++, dev_list.targets++) { 461static bool is_desired_ep11_queue(unsigned int dev_qid,
597 if ((AP_QID_DEVICE(dev_qid) == dev_list.targets->ap_id) && 462 unsigned short target_num,
598 (AP_QID_QUEUE(dev_qid) == dev_list.targets->dom_id)) { 463 struct ep11_target_dev *targets)
464{
465 while (target_num-- > 0) {
466 if (AP_MKQID(targets->ap_id, targets->dom_id) == dev_qid)
599 return true; 467 return true;
600 } 468 targets++;
601 } 469 }
602 return false; 470 return false;
603} 471}
604 472
605static long zcrypt_send_ep11_cprb(struct ep11_urb *xcrb) 473static long zcrypt_send_ep11_cprb(struct ep11_urb *xcrb)
606{ 474{
607 struct zcrypt_device *zdev; 475 struct zcrypt_card *zc, *pref_zc;
608 bool autoselect = false; 476 struct zcrypt_queue *zq, *pref_zq;
609 int rc; 477 struct ep11_target_dev *targets;
610 struct ep11_target_dev_list ep11_dev_list = { 478 unsigned short target_num;
611 .targets_num = 0x00, 479 unsigned int weight, pref_weight;
612 .targets = NULL, 480 unsigned int func_code;
613 }; 481 struct ap_message ap_msg;
482 int qid = 0, rc = -ENODEV;
483
484 trace_s390_zcrypt_req(xcrb, TP_ZSENDEP11CPRB);
614 485
615 ep11_dev_list.targets_num = (unsigned short) xcrb->targets_num; 486 target_num = (unsigned short) xcrb->targets_num;
616 487
617 /* empty list indicates autoselect (all available targets) */ 488 /* empty list indicates autoselect (all available targets) */
618 if (ep11_dev_list.targets_num == 0) 489 targets = NULL;
619 autoselect = true; 490 if (target_num != 0) {
620 else { 491 struct ep11_target_dev __user *uptr;
621 ep11_dev_list.targets = kcalloc((unsigned short)
622 xcrb->targets_num,
623 sizeof(struct ep11_target_dev),
624 GFP_KERNEL);
625 if (!ep11_dev_list.targets)
626 return -ENOMEM;
627 492
628 if (copy_from_user(ep11_dev_list.targets, 493 targets = kcalloc(target_num, sizeof(*targets), GFP_KERNEL);
629 (struct ep11_target_dev __force __user *) 494 if (!targets) {
630 xcrb->targets, xcrb->targets_num * 495 rc = -ENOMEM;
631 sizeof(struct ep11_target_dev))) 496 goto out;
632 return -EFAULT; 497 }
498
499 uptr = (struct ep11_target_dev __force __user *) xcrb->targets;
500 if (copy_from_user(targets, uptr,
501 target_num * sizeof(*targets))) {
502 rc = -EFAULT;
503 goto out;
504 }
633 } 505 }
634 506
635 spin_lock_bh(&zcrypt_device_lock); 507 rc = get_ep11cprb_fc(xcrb, &ap_msg, &func_code);
636 list_for_each_entry(zdev, &zcrypt_device_list, list) { 508 if (rc)
637 /* check if device is eligible */ 509 goto out_free;
638 if (!zdev->online ||
639 zdev->ops->variant != MSGTYPE06_VARIANT_EP11)
640 continue;
641 510
642 /* check if device is selected as valid target */ 511 pref_zc = NULL;
643 if (!is_desired_ep11dev(zdev->ap_dev->qid, ep11_dev_list) && 512 pref_zq = NULL;
644 !autoselect) 513 spin_lock(&zcrypt_list_lock);
514 for_each_zcrypt_card(zc) {
515 /* Check for online EP11 cards */
516 if (!zc->online || !(zc->card->functions & 0x04000000))
517 continue;
518 /* Check for user selected EP11 card */
519 if (targets &&
520 !is_desired_ep11_card(zc->card->id, target_num, targets))
645 continue; 521 continue;
522 /* get weight index of the card device */
523 weight = speed_idx_ep11(func_code) * zc->speed_rating[SECKEY];
524 if (zcrypt_card_compare(zc, pref_zc, weight, pref_weight))
525 continue;
526 for_each_zcrypt_queue(zq, zc) {
527 /* check if device is online and eligible */
528 if (!zq->online ||
529 !zq->ops->send_ep11_cprb ||
530 (targets &&
531 !is_desired_ep11_queue(zq->queue->qid,
532 target_num, targets)))
533 continue;
534 if (zcrypt_queue_compare(zq, pref_zq,
535 weight, pref_weight))
536 continue;
537 pref_zc = zc;
538 pref_zq = zq;
539 pref_weight = weight;
540 }
541 }
542 pref_zq = zcrypt_pick_queue(pref_zc, pref_zq, weight);
543 spin_unlock(&zcrypt_list_lock);
646 544
647 zcrypt_device_get(zdev); 545 if (!pref_zq) {
648 get_device(&zdev->ap_dev->device); 546 rc = -ENODEV;
649 zdev->request_count++; 547 goto out_free;
650 __zcrypt_decrease_preference(zdev);
651 if (try_module_get(zdev->ap_dev->drv->driver.owner)) {
652 spin_unlock_bh(&zcrypt_device_lock);
653 rc = zdev->ops->send_ep11_cprb(zdev, xcrb);
654 spin_lock_bh(&zcrypt_device_lock);
655 module_put(zdev->ap_dev->drv->driver.owner);
656 } else {
657 rc = -EAGAIN;
658 }
659 zdev->request_count--;
660 __zcrypt_increase_preference(zdev);
661 put_device(&zdev->ap_dev->device);
662 zcrypt_device_put(zdev);
663 spin_unlock_bh(&zcrypt_device_lock);
664 return rc;
665 } 548 }
666 spin_unlock_bh(&zcrypt_device_lock); 549
667 return -ENODEV; 550 qid = pref_zq->queue->qid;
551 rc = pref_zq->ops->send_ep11_cprb(pref_zq, xcrb, &ap_msg);
552
553 spin_lock(&zcrypt_list_lock);
554 zcrypt_drop_queue(pref_zc, pref_zq, weight);
555 spin_unlock(&zcrypt_list_lock);
556
557out_free:
558 kfree(targets);
559out:
560 trace_s390_zcrypt_rep(xcrb, func_code, rc,
561 AP_QID_CARD(qid), AP_QID_QUEUE(qid));
562 return rc;
668} 563}
669 564
670static long zcrypt_rng(char *buffer) 565static long zcrypt_rng(char *buffer)
671{ 566{
672 struct zcrypt_device *zdev; 567 struct zcrypt_card *zc, *pref_zc;
673 int rc; 568 struct zcrypt_queue *zq, *pref_zq;
569 unsigned int weight, pref_weight;
570 unsigned int func_code;
571 struct ap_message ap_msg;
572 unsigned int domain;
573 int qid = 0, rc = -ENODEV;
574
575 trace_s390_zcrypt_req(buffer, TP_HWRNGCPRB);
674 576
675 spin_lock_bh(&zcrypt_device_lock); 577 rc = get_rng_fc(&ap_msg, &func_code, &domain);
676 list_for_each_entry(zdev, &zcrypt_device_list, list) { 578 if (rc)
677 if (!zdev->online || !zdev->ops->rng) 579 goto out;
580
581 pref_zc = NULL;
582 pref_zq = NULL;
583 spin_lock(&zcrypt_list_lock);
584 for_each_zcrypt_card(zc) {
585 /* Check for online CCA cards */
586 if (!zc->online || !(zc->card->functions & 0x10000000))
678 continue; 587 continue;
679 zcrypt_device_get(zdev); 588 /* get weight index of the card device */
680 get_device(&zdev->ap_dev->device); 589 weight = zc->speed_rating[func_code];
681 zdev->request_count++; 590 if (zcrypt_card_compare(zc, pref_zc, weight, pref_weight))
682 __zcrypt_decrease_preference(zdev); 591 continue;
683 if (try_module_get(zdev->ap_dev->drv->driver.owner)) { 592 for_each_zcrypt_queue(zq, zc) {
684 spin_unlock_bh(&zcrypt_device_lock); 593 /* check if device is online and eligible */
685 rc = zdev->ops->rng(zdev, buffer); 594 if (!zq->online || !zq->ops->rng)
686 spin_lock_bh(&zcrypt_device_lock); 595 continue;
687 module_put(zdev->ap_dev->drv->driver.owner); 596 if (zcrypt_queue_compare(zq, pref_zq,
688 } else 597 weight, pref_weight))
689 rc = -EAGAIN; 598 continue;
690 zdev->request_count--; 599 pref_zc = zc;
691 __zcrypt_increase_preference(zdev); 600 pref_zq = zq;
692 put_device(&zdev->ap_dev->device); 601 pref_weight = weight;
693 zcrypt_device_put(zdev); 602 }
694 spin_unlock_bh(&zcrypt_device_lock); 603 }
695 return rc; 604 pref_zq = zcrypt_pick_queue(pref_zc, pref_zq, weight);
605 spin_unlock(&zcrypt_list_lock);
606
607 if (!pref_zq)
608 return -ENODEV;
609
610 qid = pref_zq->queue->qid;
611 rc = pref_zq->ops->rng(pref_zq, buffer, &ap_msg);
612
613 spin_lock(&zcrypt_list_lock);
614 zcrypt_drop_queue(pref_zc, pref_zq, weight);
615 spin_unlock(&zcrypt_list_lock);
616
617out:
618 trace_s390_zcrypt_rep(buffer, func_code, rc,
619 AP_QID_CARD(qid), AP_QID_QUEUE(qid));
620 return rc;
621}
622
623static void zcrypt_device_status_mask(struct zcrypt_device_matrix *matrix)
624{
625 struct zcrypt_card *zc;
626 struct zcrypt_queue *zq;
627 struct zcrypt_device_status *stat;
628
629 memset(matrix, 0, sizeof(*matrix));
630 spin_lock(&zcrypt_list_lock);
631 for_each_zcrypt_card(zc) {
632 for_each_zcrypt_queue(zq, zc) {
633 stat = matrix->device;
634 stat += AP_QID_CARD(zq->queue->qid) * MAX_ZDEV_DOMAINS;
635 stat += AP_QID_QUEUE(zq->queue->qid);
636 stat->hwtype = zc->card->ap_dev.device_type;
637 stat->functions = zc->card->functions >> 26;
638 stat->qid = zq->queue->qid;
639 stat->online = zq->online ? 0x01 : 0x00;
640 }
696 } 641 }
697 spin_unlock_bh(&zcrypt_device_lock); 642 spin_unlock(&zcrypt_list_lock);
698 return -ENODEV;
699} 643}
644EXPORT_SYMBOL(zcrypt_device_status_mask);
700 645
701static void zcrypt_status_mask(char status[AP_DEVICES]) 646static void zcrypt_status_mask(char status[AP_DEVICES])
702{ 647{
703 struct zcrypt_device *zdev; 648 struct zcrypt_card *zc;
649 struct zcrypt_queue *zq;
704 650
705 memset(status, 0, sizeof(char) * AP_DEVICES); 651 memset(status, 0, sizeof(char) * AP_DEVICES);
706 spin_lock_bh(&zcrypt_device_lock); 652 spin_lock(&zcrypt_list_lock);
707 list_for_each_entry(zdev, &zcrypt_device_list, list) 653 for_each_zcrypt_card(zc) {
708 status[AP_QID_DEVICE(zdev->ap_dev->qid)] = 654 for_each_zcrypt_queue(zq, zc) {
709 zdev->online ? zdev->user_space_type : 0x0d; 655 if (AP_QID_QUEUE(zq->queue->qid) != ap_domain_index)
710 spin_unlock_bh(&zcrypt_device_lock); 656 continue;
657 status[AP_QID_CARD(zq->queue->qid)] =
658 zc->online ? zc->user_space_type : 0x0d;
659 }
660 }
661 spin_unlock(&zcrypt_list_lock);
711} 662}
712 663
713static void zcrypt_qdepth_mask(char qdepth[AP_DEVICES]) 664static void zcrypt_qdepth_mask(char qdepth[AP_DEVICES])
714{ 665{
715 struct zcrypt_device *zdev; 666 struct zcrypt_card *zc;
667 struct zcrypt_queue *zq;
716 668
717 memset(qdepth, 0, sizeof(char) * AP_DEVICES); 669 memset(qdepth, 0, sizeof(char) * AP_DEVICES);
718 spin_lock_bh(&zcrypt_device_lock); 670 spin_lock(&zcrypt_list_lock);
719 list_for_each_entry(zdev, &zcrypt_device_list, list) { 671 for_each_zcrypt_card(zc) {
720 spin_lock(&zdev->ap_dev->lock); 672 for_each_zcrypt_queue(zq, zc) {
721 qdepth[AP_QID_DEVICE(zdev->ap_dev->qid)] = 673 if (AP_QID_QUEUE(zq->queue->qid) != ap_domain_index)
722 zdev->ap_dev->pendingq_count + 674 continue;
723 zdev->ap_dev->requestq_count; 675 spin_lock(&zq->queue->lock);
724 spin_unlock(&zdev->ap_dev->lock); 676 qdepth[AP_QID_CARD(zq->queue->qid)] =
677 zq->queue->pendingq_count +
678 zq->queue->requestq_count;
679 spin_unlock(&zq->queue->lock);
680 }
725 } 681 }
726 spin_unlock_bh(&zcrypt_device_lock); 682 spin_unlock(&zcrypt_list_lock);
727} 683}
728 684
729static void zcrypt_perdev_reqcnt(int reqcnt[AP_DEVICES]) 685static void zcrypt_perdev_reqcnt(int reqcnt[AP_DEVICES])
730{ 686{
731 struct zcrypt_device *zdev; 687 struct zcrypt_card *zc;
688 struct zcrypt_queue *zq;
732 689
733 memset(reqcnt, 0, sizeof(int) * AP_DEVICES); 690 memset(reqcnt, 0, sizeof(int) * AP_DEVICES);
734 spin_lock_bh(&zcrypt_device_lock); 691 spin_lock(&zcrypt_list_lock);
735 list_for_each_entry(zdev, &zcrypt_device_list, list) { 692 for_each_zcrypt_card(zc) {
736 spin_lock(&zdev->ap_dev->lock); 693 for_each_zcrypt_queue(zq, zc) {
737 reqcnt[AP_QID_DEVICE(zdev->ap_dev->qid)] = 694 if (AP_QID_QUEUE(zq->queue->qid) != ap_domain_index)
738 zdev->ap_dev->total_request_count; 695 continue;
739 spin_unlock(&zdev->ap_dev->lock); 696 spin_lock(&zq->queue->lock);
697 reqcnt[AP_QID_CARD(zq->queue->qid)] =
698 zq->queue->total_request_count;
699 spin_unlock(&zq->queue->lock);
700 }
740 } 701 }
741 spin_unlock_bh(&zcrypt_device_lock); 702 spin_unlock(&zcrypt_list_lock);
742} 703}
743 704
744static int zcrypt_pendingq_count(void) 705static int zcrypt_pendingq_count(void)
745{ 706{
746 struct zcrypt_device *zdev; 707 struct zcrypt_card *zc;
747 int pendingq_count = 0; 708 struct zcrypt_queue *zq;
748 709 int pendingq_count;
749 spin_lock_bh(&zcrypt_device_lock); 710
750 list_for_each_entry(zdev, &zcrypt_device_list, list) { 711 pendingq_count = 0;
751 spin_lock(&zdev->ap_dev->lock); 712 spin_lock(&zcrypt_list_lock);
752 pendingq_count += zdev->ap_dev->pendingq_count; 713 for_each_zcrypt_card(zc) {
753 spin_unlock(&zdev->ap_dev->lock); 714 for_each_zcrypt_queue(zq, zc) {
715 if (AP_QID_QUEUE(zq->queue->qid) != ap_domain_index)
716 continue;
717 spin_lock(&zq->queue->lock);
718 pendingq_count += zq->queue->pendingq_count;
719 spin_unlock(&zq->queue->lock);
720 }
754 } 721 }
755 spin_unlock_bh(&zcrypt_device_lock); 722 spin_unlock(&zcrypt_list_lock);
756 return pendingq_count; 723 return pendingq_count;
757} 724}
758 725
759static int zcrypt_requestq_count(void) 726static int zcrypt_requestq_count(void)
760{ 727{
761 struct zcrypt_device *zdev; 728 struct zcrypt_card *zc;
762 int requestq_count = 0; 729 struct zcrypt_queue *zq;
763 730 int requestq_count;
764 spin_lock_bh(&zcrypt_device_lock); 731
765 list_for_each_entry(zdev, &zcrypt_device_list, list) { 732 requestq_count = 0;
766 spin_lock(&zdev->ap_dev->lock); 733 spin_lock(&zcrypt_list_lock);
767 requestq_count += zdev->ap_dev->requestq_count; 734 for_each_zcrypt_card(zc) {
768 spin_unlock(&zdev->ap_dev->lock); 735 for_each_zcrypt_queue(zq, zc) {
736 if (AP_QID_QUEUE(zq->queue->qid) != ap_domain_index)
737 continue;
738 spin_lock(&zq->queue->lock);
739 requestq_count += zq->queue->requestq_count;
740 spin_unlock(&zq->queue->lock);
741 }
769 } 742 }
770 spin_unlock_bh(&zcrypt_device_lock); 743 spin_unlock(&zcrypt_list_lock);
771 return requestq_count; 744 return requestq_count;
772} 745}
773 746
774static int zcrypt_count_type(int type) 747static int zcrypt_count_type(int type)
775{ 748{
776 struct zcrypt_device *zdev; 749 struct zcrypt_card *zc;
777 int device_count = 0; 750 struct zcrypt_queue *zq;
778 751 int device_count;
779 spin_lock_bh(&zcrypt_device_lock); 752
780 list_for_each_entry(zdev, &zcrypt_device_list, list) 753 device_count = 0;
781 if (zdev->user_space_type == type) 754 spin_lock(&zcrypt_list_lock);
755 for_each_zcrypt_card(zc) {
756 if (zc->card->id != type)
757 continue;
758 for_each_zcrypt_queue(zq, zc) {
759 if (AP_QID_QUEUE(zq->queue->qid) != ap_domain_index)
760 continue;
782 device_count++; 761 device_count++;
783 spin_unlock_bh(&zcrypt_device_lock); 762 }
763 }
764 spin_unlock(&zcrypt_list_lock);
784 return device_count; 765 return device_count;
785} 766}
786 767
@@ -887,6 +868,25 @@ static long zcrypt_unlocked_ioctl(struct file *filp, unsigned int cmd,
887 return -EFAULT; 868 return -EFAULT;
888 return rc; 869 return rc;
889 } 870 }
871 case ZDEVICESTATUS: {
872 struct zcrypt_device_matrix *device_status;
873
874 device_status = kzalloc(sizeof(struct zcrypt_device_matrix),
875 GFP_KERNEL);
876 if (!device_status)
877 return -ENOMEM;
878
879 zcrypt_device_status_mask(device_status);
880
881 if (copy_to_user((char __user *) arg, device_status,
882 sizeof(struct zcrypt_device_matrix))) {
883 kfree(device_status);
884 return -EFAULT;
885 }
886
887 kfree(device_status);
888 return 0;
889 }
890 case Z90STAT_STATUS_MASK: { 890 case Z90STAT_STATUS_MASK: {
891 char status[AP_DEVICES]; 891 char status[AP_DEVICES];
892 zcrypt_status_mask(status); 892 zcrypt_status_mask(status);
@@ -1249,29 +1249,36 @@ static int zcrypt_proc_open(struct inode *inode, struct file *file)
1249 1249
1250static void zcrypt_disable_card(int index) 1250static void zcrypt_disable_card(int index)
1251{ 1251{
1252 struct zcrypt_device *zdev; 1252 struct zcrypt_card *zc;
1253 struct zcrypt_queue *zq;
1253 1254
1254 spin_lock_bh(&zcrypt_device_lock); 1255 spin_lock(&zcrypt_list_lock);
1255 list_for_each_entry(zdev, &zcrypt_device_list, list) 1256 for_each_zcrypt_card(zc) {
1256 if (AP_QID_DEVICE(zdev->ap_dev->qid) == index) { 1257 for_each_zcrypt_queue(zq, zc) {
1257 zdev->online = 0; 1258 if (AP_QID_QUEUE(zq->queue->qid) != ap_domain_index)
1258 ap_flush_queue(zdev->ap_dev); 1259 continue;
1259 break; 1260 zq->online = 0;
1261 ap_flush_queue(zq->queue);
1260 } 1262 }
1261 spin_unlock_bh(&zcrypt_device_lock); 1263 }
1264 spin_unlock(&zcrypt_list_lock);
1262} 1265}
1263 1266
1264static void zcrypt_enable_card(int index) 1267static void zcrypt_enable_card(int index)
1265{ 1268{
1266 struct zcrypt_device *zdev; 1269 struct zcrypt_card *zc;
1270 struct zcrypt_queue *zq;
1267 1271
1268 spin_lock_bh(&zcrypt_device_lock); 1272 spin_lock(&zcrypt_list_lock);
1269 list_for_each_entry(zdev, &zcrypt_device_list, list) 1273 for_each_zcrypt_card(zc) {
1270 if (AP_QID_DEVICE(zdev->ap_dev->qid) == index) { 1274 for_each_zcrypt_queue(zq, zc) {
1271 zdev->online = 1; 1275 if (AP_QID_QUEUE(zq->queue->qid) != ap_domain_index)
1272 break; 1276 continue;
1277 zq->online = 1;
1278 ap_flush_queue(zq->queue);
1273 } 1279 }
1274 spin_unlock_bh(&zcrypt_device_lock); 1280 }
1281 spin_unlock(&zcrypt_list_lock);
1275} 1282}
1276 1283
1277static ssize_t zcrypt_proc_write(struct file *file, const char __user *buffer, 1284static ssize_t zcrypt_proc_write(struct file *file, const char __user *buffer,
@@ -1369,7 +1376,7 @@ static struct hwrng zcrypt_rng_dev = {
1369 .quality = 990, 1376 .quality = 990,
1370}; 1377};
1371 1378
1372static int zcrypt_rng_device_add(void) 1379int zcrypt_rng_device_add(void)
1373{ 1380{
1374 int rc = 0; 1381 int rc = 0;
1375 1382
@@ -1399,7 +1406,7 @@ out:
1399 return rc; 1406 return rc;
1400} 1407}
1401 1408
1402static void zcrypt_rng_device_remove(void) 1409void zcrypt_rng_device_remove(void)
1403{ 1410{
1404 mutex_lock(&zcrypt_rng_mutex); 1411 mutex_lock(&zcrypt_rng_mutex);
1405 zcrypt_rng_device_count--; 1412 zcrypt_rng_device_count--;
@@ -1412,24 +1419,19 @@ static void zcrypt_rng_device_remove(void)
1412 1419
1413int __init zcrypt_debug_init(void) 1420int __init zcrypt_debug_init(void)
1414{ 1421{
1415 debugfs_root = debugfs_create_dir("zcrypt", NULL); 1422 zcrypt_dbf_root = debugfs_create_dir("zcrypt", NULL);
1416 1423 zcrypt_dbf_info = debug_register("zcrypt", 1, 1,
1417 zcrypt_dbf_common = debug_register("zcrypt_common", 1, 1, 16); 1424 DBF_MAX_SPRINTF_ARGS * sizeof(long));
1418 debug_register_view(zcrypt_dbf_common, &debug_hex_ascii_view); 1425 debug_register_view(zcrypt_dbf_info, &debug_sprintf_view);
1419 debug_set_level(zcrypt_dbf_common, DBF_ERR); 1426 debug_set_level(zcrypt_dbf_info, DBF_ERR);
1420
1421 zcrypt_dbf_devices = debug_register("zcrypt_devices", 1, 1, 16);
1422 debug_register_view(zcrypt_dbf_devices, &debug_hex_ascii_view);
1423 debug_set_level(zcrypt_dbf_devices, DBF_ERR);
1424 1427
1425 return 0; 1428 return 0;
1426} 1429}
1427 1430
1428void zcrypt_debug_exit(void) 1431void zcrypt_debug_exit(void)
1429{ 1432{
1430 debugfs_remove(debugfs_root); 1433 debugfs_remove(zcrypt_dbf_root);
1431 debug_unregister(zcrypt_dbf_common); 1434 debug_unregister(zcrypt_dbf_info);
1432 debug_unregister(zcrypt_dbf_devices);
1433} 1435}
1434 1436
1435/** 1437/**
@@ -1453,12 +1455,15 @@ int __init zcrypt_api_init(void)
1453 goto out; 1455 goto out;
1454 1456
1455 /* Set up the proc file system */ 1457 /* Set up the proc file system */
1456 zcrypt_entry = proc_create("driver/z90crypt", 0644, NULL, &zcrypt_proc_fops); 1458 zcrypt_entry = proc_create("driver/z90crypt", 0644, NULL,
1459 &zcrypt_proc_fops);
1457 if (!zcrypt_entry) { 1460 if (!zcrypt_entry) {
1458 rc = -ENOMEM; 1461 rc = -ENOMEM;
1459 goto out_misc; 1462 goto out_misc;
1460 } 1463 }
1461 1464
1465 zcrypt_msgtype6_init();
1466 zcrypt_msgtype50_init();
1462 return 0; 1467 return 0;
1463 1468
1464out_misc: 1469out_misc:
@@ -1472,10 +1477,12 @@ out:
1472 * 1477 *
1473 * The module termination code. 1478 * The module termination code.
1474 */ 1479 */
1475void zcrypt_api_exit(void) 1480void __exit zcrypt_api_exit(void)
1476{ 1481{
1477 remove_proc_entry("driver/z90crypt", NULL); 1482 remove_proc_entry("driver/z90crypt", NULL);
1478 misc_deregister(&zcrypt_misc_device); 1483 misc_deregister(&zcrypt_misc_device);
1484 zcrypt_msgtype6_exit();
1485 zcrypt_msgtype50_exit();
1479 zcrypt_debug_exit(); 1486 zcrypt_debug_exit();
1480} 1487}
1481 1488
diff --git a/drivers/s390/crypto/zcrypt_api.h b/drivers/s390/crypto/zcrypt_api.h
index 38618f05ad92..274a59051534 100644
--- a/drivers/s390/crypto/zcrypt_api.h
+++ b/drivers/s390/crypto/zcrypt_api.h
@@ -84,57 +84,110 @@ struct ica_z90_status {
84 */ 84 */
85#define ZCRYPT_RNG_BUFFER_SIZE 4096 85#define ZCRYPT_RNG_BUFFER_SIZE 4096
86 86
87struct zcrypt_device; 87/*
88 * Identifier for Crypto Request Performance Index
89 */
90enum crypto_ops {
91 MEX_1K,
92 MEX_2K,
93 MEX_4K,
94 CRT_1K,
95 CRT_2K,
96 CRT_4K,
97 HWRNG,
98 SECKEY,
99 NUM_OPS
100};
101
102struct zcrypt_queue;
88 103
89struct zcrypt_ops { 104struct zcrypt_ops {
90 long (*rsa_modexpo)(struct zcrypt_device *, struct ica_rsa_modexpo *); 105 long (*rsa_modexpo)(struct zcrypt_queue *, struct ica_rsa_modexpo *);
91 long (*rsa_modexpo_crt)(struct zcrypt_device *, 106 long (*rsa_modexpo_crt)(struct zcrypt_queue *,
92 struct ica_rsa_modexpo_crt *); 107 struct ica_rsa_modexpo_crt *);
93 long (*send_cprb)(struct zcrypt_device *, struct ica_xcRB *); 108 long (*send_cprb)(struct zcrypt_queue *, struct ica_xcRB *,
94 long (*send_ep11_cprb)(struct zcrypt_device *, struct ep11_urb *); 109 struct ap_message *);
95 long (*rng)(struct zcrypt_device *, char *); 110 long (*send_ep11_cprb)(struct zcrypt_queue *, struct ep11_urb *,
111 struct ap_message *);
112 long (*rng)(struct zcrypt_queue *, char *, struct ap_message *);
96 struct list_head list; /* zcrypt ops list. */ 113 struct list_head list; /* zcrypt ops list. */
97 struct module *owner; 114 struct module *owner;
98 int variant; 115 int variant;
99 char name[128]; 116 char name[128];
100}; 117};
101 118
102struct zcrypt_device { 119struct zcrypt_card {
103 struct list_head list; /* Device list. */ 120 struct list_head list; /* Device list. */
104 spinlock_t lock; /* Per device lock. */ 121 struct list_head zqueues; /* List of zcrypt queues */
105 struct kref refcount; /* device refcounting */ 122 struct kref refcount; /* device refcounting */
106 struct ap_device *ap_dev; /* The "real" ap device. */ 123 struct ap_card *card; /* The "real" ap card device. */
107 struct zcrypt_ops *ops; /* Crypto operations. */
108 int online; /* User online/offline */ 124 int online; /* User online/offline */
109 125
110 int user_space_type; /* User space device id. */ 126 int user_space_type; /* User space device id. */
111 char *type_string; /* User space device name. */ 127 char *type_string; /* User space device name. */
112 int min_mod_size; /* Min number of bits. */ 128 int min_mod_size; /* Min number of bits. */
113 int max_mod_size; /* Max number of bits. */ 129 int max_mod_size; /* Max number of bits. */
114 int short_crt; /* Card has crt length restriction. */ 130 int max_exp_bit_length;
115 int speed_rating; /* Speed of the crypto device. */ 131 int speed_rating[NUM_OPS]; /* Speed idx of crypto ops. */
132 atomic_t load; /* Utilization of the crypto device */
116 133
117 int request_count; /* # current requests. */ 134 int request_count; /* # current requests. */
135};
118 136
119 struct ap_message reply; /* Per-device reply structure. */ 137struct zcrypt_queue {
120 int max_exp_bit_length; 138 struct list_head list; /* Device list. */
139 struct kref refcount; /* device refcounting */
140 struct zcrypt_card *zcard;
141 struct zcrypt_ops *ops; /* Crypto operations. */
142 struct ap_queue *queue; /* The "real" ap queue device. */
143 int online; /* User online/offline */
144
145 atomic_t load; /* Utilization of the crypto device */
121 146
122 debug_info_t *dbf_area; /* debugging */ 147 int request_count; /* # current requests. */
148
149 struct ap_message reply; /* Per-device reply structure. */
123}; 150};
124 151
125/* transport layer rescanning */ 152/* transport layer rescanning */
126extern atomic_t zcrypt_rescan_req; 153extern atomic_t zcrypt_rescan_req;
127 154
128struct zcrypt_device *zcrypt_device_alloc(size_t); 155extern spinlock_t zcrypt_list_lock;
129void zcrypt_device_free(struct zcrypt_device *); 156extern int zcrypt_device_count;
130void zcrypt_device_get(struct zcrypt_device *); 157extern struct list_head zcrypt_card_list;
131int zcrypt_device_put(struct zcrypt_device *); 158
132int zcrypt_device_register(struct zcrypt_device *); 159#define for_each_zcrypt_card(_zc) \
133void zcrypt_device_unregister(struct zcrypt_device *); 160 list_for_each_entry(_zc, &zcrypt_card_list, list)
161
162#define for_each_zcrypt_queue(_zq, _zc) \
163 list_for_each_entry(_zq, &(_zc)->zqueues, list)
164
165struct zcrypt_card *zcrypt_card_alloc(void);
166void zcrypt_card_free(struct zcrypt_card *);
167void zcrypt_card_get(struct zcrypt_card *);
168int zcrypt_card_put(struct zcrypt_card *);
169int zcrypt_card_register(struct zcrypt_card *);
170void zcrypt_card_unregister(struct zcrypt_card *);
171struct zcrypt_card *zcrypt_card_get_best(unsigned int *,
172 unsigned int, unsigned int);
173void zcrypt_card_put_best(struct zcrypt_card *, unsigned int);
174
175struct zcrypt_queue *zcrypt_queue_alloc(size_t);
176void zcrypt_queue_free(struct zcrypt_queue *);
177void zcrypt_queue_get(struct zcrypt_queue *);
178int zcrypt_queue_put(struct zcrypt_queue *);
179int zcrypt_queue_register(struct zcrypt_queue *);
180void zcrypt_queue_unregister(struct zcrypt_queue *);
181void zcrypt_queue_force_online(struct zcrypt_queue *, int);
182struct zcrypt_queue *zcrypt_queue_get_best(unsigned int, unsigned int);
183void zcrypt_queue_put_best(struct zcrypt_queue *, unsigned int);
184
185int zcrypt_rng_device_add(void);
186void zcrypt_rng_device_remove(void);
187
134void zcrypt_msgtype_register(struct zcrypt_ops *); 188void zcrypt_msgtype_register(struct zcrypt_ops *);
135void zcrypt_msgtype_unregister(struct zcrypt_ops *); 189void zcrypt_msgtype_unregister(struct zcrypt_ops *);
136struct zcrypt_ops *zcrypt_msgtype_request(unsigned char *, int); 190struct zcrypt_ops *zcrypt_msgtype(unsigned char *, int);
137void zcrypt_msgtype_release(struct zcrypt_ops *);
138int zcrypt_api_init(void); 191int zcrypt_api_init(void);
139void zcrypt_api_exit(void); 192void zcrypt_api_exit(void);
140 193
diff --git a/drivers/s390/crypto/zcrypt_card.c b/drivers/s390/crypto/zcrypt_card.c
new file mode 100644
index 000000000000..53436ea52230
--- /dev/null
+++ b/drivers/s390/crypto/zcrypt_card.c
@@ -0,0 +1,187 @@
1/*
2 * zcrypt 2.1.0
3 *
4 * Copyright IBM Corp. 2001, 2012
5 * Author(s): Robert Burroughs
6 * Eric Rossman (edrossma@us.ibm.com)
7 * Cornelia Huck <cornelia.huck@de.ibm.com>
8 *
9 * Hotplug & misc device support: Jochen Roehrig (roehrig@de.ibm.com)
10 * Major cleanup & driver split: Martin Schwidefsky <schwidefsky@de.ibm.com>
11 * Ralph Wuerthner <rwuerthn@de.ibm.com>
12 * MSGTYPE restruct: Holger Dengler <hd@linux.vnet.ibm.com>
13 *
14 * This program is free software; you can redistribute it and/or modify
15 * it under the terms of the GNU General Public License as published by
16 * the Free Software Foundation; either version 2, or (at your option)
17 * any later version.
18 *
19 * This program is distributed in the hope that it will be useful,
20 * but WITHOUT ANY WARRANTY; without even the implied warranty of
21 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
22 * GNU General Public License for more details.
23 */
24
25#include <linux/module.h>
26#include <linux/init.h>
27#include <linux/interrupt.h>
28#include <linux/miscdevice.h>
29#include <linux/fs.h>
30#include <linux/proc_fs.h>
31#include <linux/seq_file.h>
32#include <linux/compat.h>
33#include <linux/slab.h>
34#include <linux/atomic.h>
35#include <linux/uaccess.h>
36#include <linux/hw_random.h>
37#include <linux/debugfs.h>
38#include <asm/debug.h>
39
40#include "zcrypt_debug.h"
41#include "zcrypt_api.h"
42
43#include "zcrypt_msgtype6.h"
44#include "zcrypt_msgtype50.h"
45
46/*
47 * Device attributes common for all crypto card devices.
48 */
49
50static ssize_t zcrypt_card_type_show(struct device *dev,
51 struct device_attribute *attr, char *buf)
52{
53 struct zcrypt_card *zc = to_ap_card(dev)->private;
54
55 return snprintf(buf, PAGE_SIZE, "%s\n", zc->type_string);
56}
57
58static DEVICE_ATTR(type, 0444, zcrypt_card_type_show, NULL);
59
60static ssize_t zcrypt_card_online_show(struct device *dev,
61 struct device_attribute *attr,
62 char *buf)
63{
64 struct zcrypt_card *zc = to_ap_card(dev)->private;
65
66 return snprintf(buf, PAGE_SIZE, "%d\n", zc->online);
67}
68
69static ssize_t zcrypt_card_online_store(struct device *dev,
70 struct device_attribute *attr,
71 const char *buf, size_t count)
72{
73 struct zcrypt_card *zc = to_ap_card(dev)->private;
74 struct zcrypt_queue *zq;
75 int online, id;
76
77 if (sscanf(buf, "%d\n", &online) != 1 || online < 0 || online > 1)
78 return -EINVAL;
79
80 zc->online = online;
81 id = zc->card->id;
82
83 ZCRYPT_DBF(DBF_INFO, "card=%02x online=%d\n", id, online);
84
85 spin_lock(&zcrypt_list_lock);
86 list_for_each_entry(zq, &zc->zqueues, list)
87 zcrypt_queue_force_online(zq, online);
88 spin_unlock(&zcrypt_list_lock);
89 return count;
90}
91
92static DEVICE_ATTR(online, 0644, zcrypt_card_online_show,
93 zcrypt_card_online_store);
94
95static struct attribute *zcrypt_card_attrs[] = {
96 &dev_attr_type.attr,
97 &dev_attr_online.attr,
98 NULL,
99};
100
101static struct attribute_group zcrypt_card_attr_group = {
102 .attrs = zcrypt_card_attrs,
103};
104
105struct zcrypt_card *zcrypt_card_alloc(void)
106{
107 struct zcrypt_card *zc;
108
109 zc = kzalloc(sizeof(struct zcrypt_card), GFP_KERNEL);
110 if (!zc)
111 return NULL;
112 INIT_LIST_HEAD(&zc->list);
113 INIT_LIST_HEAD(&zc->zqueues);
114 kref_init(&zc->refcount);
115 return zc;
116}
117EXPORT_SYMBOL(zcrypt_card_alloc);
118
119void zcrypt_card_free(struct zcrypt_card *zc)
120{
121 kfree(zc);
122}
123EXPORT_SYMBOL(zcrypt_card_free);
124
125static void zcrypt_card_release(struct kref *kref)
126{
127 struct zcrypt_card *zdev =
128 container_of(kref, struct zcrypt_card, refcount);
129 zcrypt_card_free(zdev);
130}
131
132void zcrypt_card_get(struct zcrypt_card *zc)
133{
134 kref_get(&zc->refcount);
135}
136EXPORT_SYMBOL(zcrypt_card_get);
137
138int zcrypt_card_put(struct zcrypt_card *zc)
139{
140 return kref_put(&zc->refcount, zcrypt_card_release);
141}
142EXPORT_SYMBOL(zcrypt_card_put);
143
144/**
145 * zcrypt_card_register() - Register a crypto card device.
146 * @zc: Pointer to a crypto card device
147 *
148 * Register a crypto card device. Returns 0 if successful.
149 */
150int zcrypt_card_register(struct zcrypt_card *zc)
151{
152 int rc;
153
154 rc = sysfs_create_group(&zc->card->ap_dev.device.kobj,
155 &zcrypt_card_attr_group);
156 if (rc)
157 return rc;
158
159 spin_lock(&zcrypt_list_lock);
160 list_add_tail(&zc->list, &zcrypt_card_list);
161 spin_unlock(&zcrypt_list_lock);
162
163 zc->online = 1;
164
165 ZCRYPT_DBF(DBF_INFO, "card=%02x register online=1\n", zc->card->id);
166
167 return rc;
168}
169EXPORT_SYMBOL(zcrypt_card_register);
170
171/**
172 * zcrypt_card_unregister(): Unregister a crypto card device.
173 * @zc: Pointer to crypto card device
174 *
175 * Unregister a crypto card device.
176 */
177void zcrypt_card_unregister(struct zcrypt_card *zc)
178{
179 ZCRYPT_DBF(DBF_INFO, "card=%02x unregister\n", zc->card->id);
180
181 spin_lock(&zcrypt_list_lock);
182 list_del_init(&zc->list);
183 spin_unlock(&zcrypt_list_lock);
184 sysfs_remove_group(&zc->card->ap_dev.device.kobj,
185 &zcrypt_card_attr_group);
186}
187EXPORT_SYMBOL(zcrypt_card_unregister);
diff --git a/drivers/s390/crypto/zcrypt_cex2a.c b/drivers/s390/crypto/zcrypt_cex2a.c
index 15104aaa075a..c7d48a18199e 100644
--- a/drivers/s390/crypto/zcrypt_cex2a.c
+++ b/drivers/s390/crypto/zcrypt_cex2a.c
@@ -31,6 +31,7 @@
31#include <linux/err.h> 31#include <linux/err.h>
32#include <linux/atomic.h> 32#include <linux/atomic.h>
33#include <asm/uaccess.h> 33#include <asm/uaccess.h>
34#include <linux/mod_devicetable.h>
34 35
35#include "ap_bus.h" 36#include "ap_bus.h"
36#include "zcrypt_api.h" 37#include "zcrypt_api.h"
@@ -43,9 +44,6 @@
43#define CEX3A_MIN_MOD_SIZE CEX2A_MIN_MOD_SIZE 44#define CEX3A_MIN_MOD_SIZE CEX2A_MIN_MOD_SIZE
44#define CEX3A_MAX_MOD_SIZE 512 /* 4096 bits */ 45#define CEX3A_MAX_MOD_SIZE 512 /* 4096 bits */
45 46
46#define CEX2A_SPEED_RATING 970
47#define CEX3A_SPEED_RATING 900 /* Fixme: Needs finetuning */
48
49#define CEX2A_MAX_MESSAGE_SIZE 0x390 /* sizeof(struct type50_crb2_msg) */ 47#define CEX2A_MAX_MESSAGE_SIZE 0x390 /* sizeof(struct type50_crb2_msg) */
50#define CEX2A_MAX_RESPONSE_SIZE 0x110 /* max outputdatalength + type80_hdr */ 48#define CEX2A_MAX_RESPONSE_SIZE 0x110 /* max outputdatalength + type80_hdr */
51 49
@@ -57,107 +55,195 @@
57#define CEX2A_CLEANUP_TIME (15*HZ) 55#define CEX2A_CLEANUP_TIME (15*HZ)
58#define CEX3A_CLEANUP_TIME CEX2A_CLEANUP_TIME 56#define CEX3A_CLEANUP_TIME CEX2A_CLEANUP_TIME
59 57
60static struct ap_device_id zcrypt_cex2a_ids[] = {
61 { AP_DEVICE(AP_DEVICE_TYPE_CEX2A) },
62 { AP_DEVICE(AP_DEVICE_TYPE_CEX3A) },
63 { /* end of list */ },
64};
65
66MODULE_DEVICE_TABLE(ap, zcrypt_cex2a_ids);
67MODULE_AUTHOR("IBM Corporation"); 58MODULE_AUTHOR("IBM Corporation");
68MODULE_DESCRIPTION("CEX2A Cryptographic Coprocessor device driver, " \ 59MODULE_DESCRIPTION("CEX2A Cryptographic Coprocessor device driver, " \
69 "Copyright IBM Corp. 2001, 2012"); 60 "Copyright IBM Corp. 2001, 2012");
70MODULE_LICENSE("GPL"); 61MODULE_LICENSE("GPL");
71 62
72static int zcrypt_cex2a_probe(struct ap_device *ap_dev); 63static struct ap_device_id zcrypt_cex2a_card_ids[] = {
73static void zcrypt_cex2a_remove(struct ap_device *ap_dev); 64 { .dev_type = AP_DEVICE_TYPE_CEX2A,
65 .match_flags = AP_DEVICE_ID_MATCH_CARD_TYPE },
66 { .dev_type = AP_DEVICE_TYPE_CEX3A,
67 .match_flags = AP_DEVICE_ID_MATCH_CARD_TYPE },
68 { /* end of list */ },
69};
70
71MODULE_DEVICE_TABLE(ap, zcrypt_cex2a_card_ids);
74 72
75static struct ap_driver zcrypt_cex2a_driver = { 73static struct ap_device_id zcrypt_cex2a_queue_ids[] = {
76 .probe = zcrypt_cex2a_probe, 74 { .dev_type = AP_DEVICE_TYPE_CEX2A,
77 .remove = zcrypt_cex2a_remove, 75 .match_flags = AP_DEVICE_ID_MATCH_QUEUE_TYPE },
78 .ids = zcrypt_cex2a_ids, 76 { .dev_type = AP_DEVICE_TYPE_CEX3A,
79 .request_timeout = CEX2A_CLEANUP_TIME, 77 .match_flags = AP_DEVICE_ID_MATCH_QUEUE_TYPE },
78 { /* end of list */ },
80}; 79};
81 80
81MODULE_DEVICE_TABLE(ap, zcrypt_cex2a_queue_ids);
82
82/** 83/**
83 * Probe function for CEX2A cards. It always accepts the AP device 84 * Probe function for CEX2A card devices. It always accepts the AP device
84 * since the bus_match already checked the hardware type. 85 * since the bus_match already checked the card type.
85 * @ap_dev: pointer to the AP device. 86 * @ap_dev: pointer to the AP device.
86 */ 87 */
87static int zcrypt_cex2a_probe(struct ap_device *ap_dev) 88static int zcrypt_cex2a_card_probe(struct ap_device *ap_dev)
88{ 89{
89 struct zcrypt_device *zdev = NULL; 90 /*
91 * Normalized speed ratings per crypto adapter
92 * MEX_1k, MEX_2k, MEX_4k, CRT_1k, CRT_2k, CRT_4k, RNG, SECKEY
93 */
94 static const int CEX2A_SPEED_IDX[] = {
95 800, 1000, 2000, 900, 1200, 2400, 0, 0};
96 static const int CEX3A_SPEED_IDX[] = {
97 400, 500, 1000, 450, 550, 1200, 0, 0};
98
99 struct ap_card *ac = to_ap_card(&ap_dev->device);
100 struct zcrypt_card *zc;
90 int rc = 0; 101 int rc = 0;
91 102
103 zc = zcrypt_card_alloc();
104 if (!zc)
105 return -ENOMEM;
106 zc->card = ac;
107 ac->private = zc;
108
109 if (ac->ap_dev.device_type == AP_DEVICE_TYPE_CEX2A) {
110 zc->min_mod_size = CEX2A_MIN_MOD_SIZE;
111 zc->max_mod_size = CEX2A_MAX_MOD_SIZE;
112 memcpy(zc->speed_rating, CEX2A_SPEED_IDX,
113 sizeof(CEX2A_SPEED_IDX));
114 zc->max_exp_bit_length = CEX2A_MAX_MOD_SIZE;
115 zc->type_string = "CEX2A";
116 zc->user_space_type = ZCRYPT_CEX2A;
117 } else if (ac->ap_dev.device_type == AP_DEVICE_TYPE_CEX3A) {
118 zc->min_mod_size = CEX2A_MIN_MOD_SIZE;
119 zc->max_mod_size = CEX2A_MAX_MOD_SIZE;
120 zc->max_exp_bit_length = CEX2A_MAX_MOD_SIZE;
121 if (ap_test_bit(&ac->functions, AP_FUNC_MEX4K) &&
122 ap_test_bit(&ac->functions, AP_FUNC_CRT4K)) {
123 zc->max_mod_size = CEX3A_MAX_MOD_SIZE;
124 zc->max_exp_bit_length = CEX3A_MAX_MOD_SIZE;
125 }
126 memcpy(zc->speed_rating, CEX3A_SPEED_IDX,
127 sizeof(CEX3A_SPEED_IDX));
128 zc->type_string = "CEX3A";
129 zc->user_space_type = ZCRYPT_CEX3A;
130 } else {
131 zcrypt_card_free(zc);
132 return -ENODEV;
133 }
134 zc->online = 1;
135
136 rc = zcrypt_card_register(zc);
137 if (rc) {
138 ac->private = NULL;
139 zcrypt_card_free(zc);
140 }
141
142 return rc;
143}
144
145/**
146 * This is called to remove the CEX2A card driver information
147 * if an AP card device is removed.
148 */
149static void zcrypt_cex2a_card_remove(struct ap_device *ap_dev)
150{
151 struct zcrypt_card *zc = to_ap_card(&ap_dev->device)->private;
152
153 if (zc)
154 zcrypt_card_unregister(zc);
155}
156
157static struct ap_driver zcrypt_cex2a_card_driver = {
158 .probe = zcrypt_cex2a_card_probe,
159 .remove = zcrypt_cex2a_card_remove,
160 .ids = zcrypt_cex2a_card_ids,
161};
162
163/**
164 * Probe function for CEX2A queue devices. It always accepts the AP device
165 * since the bus_match already checked the queue type.
166 * @ap_dev: pointer to the AP device.
167 */
168static int zcrypt_cex2a_queue_probe(struct ap_device *ap_dev)
169{
170 struct ap_queue *aq = to_ap_queue(&ap_dev->device);
171 struct zcrypt_queue *zq = NULL;
172 int rc;
173
92 switch (ap_dev->device_type) { 174 switch (ap_dev->device_type) {
93 case AP_DEVICE_TYPE_CEX2A: 175 case AP_DEVICE_TYPE_CEX2A:
94 zdev = zcrypt_device_alloc(CEX2A_MAX_RESPONSE_SIZE); 176 zq = zcrypt_queue_alloc(CEX2A_MAX_RESPONSE_SIZE);
95 if (!zdev) 177 if (!zq)
96 return -ENOMEM; 178 return -ENOMEM;
97 zdev->user_space_type = ZCRYPT_CEX2A;
98 zdev->type_string = "CEX2A";
99 zdev->min_mod_size = CEX2A_MIN_MOD_SIZE;
100 zdev->max_mod_size = CEX2A_MAX_MOD_SIZE;
101 zdev->short_crt = 1;
102 zdev->speed_rating = CEX2A_SPEED_RATING;
103 zdev->max_exp_bit_length = CEX2A_MAX_MOD_SIZE;
104 break; 179 break;
105 case AP_DEVICE_TYPE_CEX3A: 180 case AP_DEVICE_TYPE_CEX3A:
106 zdev = zcrypt_device_alloc(CEX3A_MAX_RESPONSE_SIZE); 181 zq = zcrypt_queue_alloc(CEX3A_MAX_RESPONSE_SIZE);
107 if (!zdev) 182 if (!zq)
108 return -ENOMEM; 183 return -ENOMEM;
109 zdev->user_space_type = ZCRYPT_CEX3A;
110 zdev->type_string = "CEX3A";
111 zdev->min_mod_size = CEX2A_MIN_MOD_SIZE;
112 zdev->max_mod_size = CEX2A_MAX_MOD_SIZE;
113 zdev->max_exp_bit_length = CEX2A_MAX_MOD_SIZE;
114 if (ap_test_bit(&ap_dev->functions, AP_FUNC_MEX4K) &&
115 ap_test_bit(&ap_dev->functions, AP_FUNC_CRT4K)) {
116 zdev->max_mod_size = CEX3A_MAX_MOD_SIZE;
117 zdev->max_exp_bit_length = CEX3A_MAX_MOD_SIZE;
118 }
119 zdev->short_crt = 1;
120 zdev->speed_rating = CEX3A_SPEED_RATING;
121 break; 184 break;
122 } 185 }
123 if (!zdev) 186 if (!zq)
124 return -ENODEV; 187 return -ENODEV;
125 zdev->ops = zcrypt_msgtype_request(MSGTYPE50_NAME, 188 zq->ops = zcrypt_msgtype(MSGTYPE50_NAME, MSGTYPE50_VARIANT_DEFAULT);
126 MSGTYPE50_VARIANT_DEFAULT); 189 zq->queue = aq;
127 zdev->ap_dev = ap_dev; 190 zq->online = 1;
128 zdev->online = 1; 191 atomic_set(&zq->load, 0);
129 ap_device_init_reply(ap_dev, &zdev->reply); 192 ap_queue_init_reply(aq, &zq->reply);
130 ap_dev->private = zdev; 193 aq->request_timeout = CEX2A_CLEANUP_TIME,
131 rc = zcrypt_device_register(zdev); 194 aq->private = zq;
195 rc = zcrypt_queue_register(zq);
132 if (rc) { 196 if (rc) {
133 ap_dev->private = NULL; 197 aq->private = NULL;
134 zcrypt_msgtype_release(zdev->ops); 198 zcrypt_queue_free(zq);
135 zcrypt_device_free(zdev);
136 } 199 }
200
137 return rc; 201 return rc;
138} 202}
139 203
140/** 204/**
141 * This is called to remove the extended CEX2A driver information 205 * This is called to remove the CEX2A queue driver information
142 * if an AP device is removed. 206 * if an AP queue device is removed.
143 */ 207 */
144static void zcrypt_cex2a_remove(struct ap_device *ap_dev) 208static void zcrypt_cex2a_queue_remove(struct ap_device *ap_dev)
145{ 209{
146 struct zcrypt_device *zdev = ap_dev->private; 210 struct ap_queue *aq = to_ap_queue(&ap_dev->device);
147 struct zcrypt_ops *zops = zdev->ops; 211 struct zcrypt_queue *zq = aq->private;
148 212
149 zcrypt_device_unregister(zdev); 213 ap_queue_remove(aq);
150 zcrypt_msgtype_release(zops); 214 if (zq)
215 zcrypt_queue_unregister(zq);
151} 216}
152 217
218static struct ap_driver zcrypt_cex2a_queue_driver = {
219 .probe = zcrypt_cex2a_queue_probe,
220 .remove = zcrypt_cex2a_queue_remove,
221 .suspend = ap_queue_suspend,
222 .resume = ap_queue_resume,
223 .ids = zcrypt_cex2a_queue_ids,
224};
225
153int __init zcrypt_cex2a_init(void) 226int __init zcrypt_cex2a_init(void)
154{ 227{
155 return ap_driver_register(&zcrypt_cex2a_driver, THIS_MODULE, "cex2a"); 228 int rc;
229
230 rc = ap_driver_register(&zcrypt_cex2a_card_driver,
231 THIS_MODULE, "cex2acard");
232 if (rc)
233 return rc;
234
235 rc = ap_driver_register(&zcrypt_cex2a_queue_driver,
236 THIS_MODULE, "cex2aqueue");
237 if (rc)
238 ap_driver_unregister(&zcrypt_cex2a_card_driver);
239
240 return rc;
156} 241}
157 242
158void __exit zcrypt_cex2a_exit(void) 243void __exit zcrypt_cex2a_exit(void)
159{ 244{
160 ap_driver_unregister(&zcrypt_cex2a_driver); 245 ap_driver_unregister(&zcrypt_cex2a_queue_driver);
246 ap_driver_unregister(&zcrypt_cex2a_card_driver);
161} 247}
162 248
163module_init(zcrypt_cex2a_init); 249module_init(zcrypt_cex2a_init);
diff --git a/drivers/s390/crypto/zcrypt_cex4.c b/drivers/s390/crypto/zcrypt_cex4.c
index ccb2e78ebf0e..4e91163d70a6 100644
--- a/drivers/s390/crypto/zcrypt_cex4.c
+++ b/drivers/s390/crypto/zcrypt_cex4.c
@@ -9,6 +9,7 @@
9#include <linux/err.h> 9#include <linux/err.h>
10#include <linux/atomic.h> 10#include <linux/atomic.h>
11#include <linux/uaccess.h> 11#include <linux/uaccess.h>
12#include <linux/mod_devicetable.h>
12 13
13#include "ap_bus.h" 14#include "ap_bus.h"
14#include "zcrypt_api.h" 15#include "zcrypt_api.h"
@@ -24,13 +25,6 @@
24#define CEX4C_MIN_MOD_SIZE 16 /* 256 bits */ 25#define CEX4C_MIN_MOD_SIZE 16 /* 256 bits */
25#define CEX4C_MAX_MOD_SIZE 512 /* 4096 bits */ 26#define CEX4C_MAX_MOD_SIZE 512 /* 4096 bits */
26 27
27#define CEX4A_SPEED_RATING 900 /* TODO new card, new speed rating */
28#define CEX4C_SPEED_RATING 6500 /* TODO new card, new speed rating */
29#define CEX4P_SPEED_RATING 7000 /* TODO new card, new speed rating */
30#define CEX5A_SPEED_RATING 450 /* TODO new card, new speed rating */
31#define CEX5C_SPEED_RATING 3250 /* TODO new card, new speed rating */
32#define CEX5P_SPEED_RATING 3500 /* TODO new card, new speed rating */
33
34#define CEX4A_MAX_MESSAGE_SIZE MSGTYPE50_CRB3_MAX_MSG_SIZE 28#define CEX4A_MAX_MESSAGE_SIZE MSGTYPE50_CRB3_MAX_MSG_SIZE
35#define CEX4C_MAX_MESSAGE_SIZE MSGTYPE06_MAX_MSG_SIZE 29#define CEX4C_MAX_MESSAGE_SIZE MSGTYPE06_MAX_MSG_SIZE
36 30
@@ -41,147 +35,246 @@
41 */ 35 */
42#define CEX4_CLEANUP_TIME (900*HZ) 36#define CEX4_CLEANUP_TIME (900*HZ)
43 37
44static struct ap_device_id zcrypt_cex4_ids[] = {
45 { AP_DEVICE(AP_DEVICE_TYPE_CEX4) },
46 { AP_DEVICE(AP_DEVICE_TYPE_CEX5) },
47 { /* end of list */ },
48};
49
50MODULE_DEVICE_TABLE(ap, zcrypt_cex4_ids);
51MODULE_AUTHOR("IBM Corporation"); 38MODULE_AUTHOR("IBM Corporation");
52MODULE_DESCRIPTION("CEX4 Cryptographic Card device driver, " \ 39MODULE_DESCRIPTION("CEX4 Cryptographic Card device driver, " \
53 "Copyright IBM Corp. 2012"); 40 "Copyright IBM Corp. 2012");
54MODULE_LICENSE("GPL"); 41MODULE_LICENSE("GPL");
55 42
56static int zcrypt_cex4_probe(struct ap_device *ap_dev); 43static struct ap_device_id zcrypt_cex4_card_ids[] = {
57static void zcrypt_cex4_remove(struct ap_device *ap_dev); 44 { .dev_type = AP_DEVICE_TYPE_CEX4,
45 .match_flags = AP_DEVICE_ID_MATCH_CARD_TYPE },
46 { .dev_type = AP_DEVICE_TYPE_CEX5,
47 .match_flags = AP_DEVICE_ID_MATCH_CARD_TYPE },
48 { /* end of list */ },
49};
58 50
59static struct ap_driver zcrypt_cex4_driver = { 51MODULE_DEVICE_TABLE(ap, zcrypt_cex4_card_ids);
60 .probe = zcrypt_cex4_probe, 52
61 .remove = zcrypt_cex4_remove, 53static struct ap_device_id zcrypt_cex4_queue_ids[] = {
62 .ids = zcrypt_cex4_ids, 54 { .dev_type = AP_DEVICE_TYPE_CEX4,
63 .request_timeout = CEX4_CLEANUP_TIME, 55 .match_flags = AP_DEVICE_ID_MATCH_QUEUE_TYPE },
56 { .dev_type = AP_DEVICE_TYPE_CEX5,
57 .match_flags = AP_DEVICE_ID_MATCH_QUEUE_TYPE },
58 { /* end of list */ },
64}; 59};
65 60
61MODULE_DEVICE_TABLE(ap, zcrypt_cex4_queue_ids);
62
66/** 63/**
67 * Probe function for CEX4 cards. It always accepts the AP device 64 * Probe function for CEX4 card device. It always accepts the AP device
68 * since the bus_match already checked the hardware type. 65 * since the bus_match already checked the hardware type.
69 * @ap_dev: pointer to the AP device. 66 * @ap_dev: pointer to the AP device.
70 */ 67 */
71static int zcrypt_cex4_probe(struct ap_device *ap_dev) 68static int zcrypt_cex4_card_probe(struct ap_device *ap_dev)
72{ 69{
73 struct zcrypt_device *zdev = NULL; 70 /*
71 * Normalized speed ratings per crypto adapter
72 * MEX_1k, MEX_2k, MEX_4k, CRT_1k, CRT_2k, CRT_4k, RNG, SECKEY
73 */
74 static const int CEX4A_SPEED_IDX[] = {
75 5, 6, 59, 20, 115, 581, 0, 0};
76 static const int CEX5A_SPEED_IDX[] = {
77 3, 3, 6, 8, 32, 218, 0, 0};
78 static const int CEX4C_SPEED_IDX[] = {
79 24, 25, 82, 41, 138, 1111, 79, 8};
80 static const int CEX5C_SPEED_IDX[] = {
81 10, 14, 23, 17, 45, 242, 63, 4};
82 static const int CEX4P_SPEED_IDX[] = {
83 142, 198, 1852, 203, 331, 1563, 0, 8};
84 static const int CEX5P_SPEED_IDX[] = {
85 49, 67, 131, 52, 85, 287, 0, 4};
86
87 struct ap_card *ac = to_ap_card(&ap_dev->device);
88 struct zcrypt_card *zc;
74 int rc = 0; 89 int rc = 0;
75 90
76 switch (ap_dev->device_type) { 91 zc = zcrypt_card_alloc();
77 case AP_DEVICE_TYPE_CEX4: 92 if (!zc)
78 case AP_DEVICE_TYPE_CEX5: 93 return -ENOMEM;
79 if (ap_test_bit(&ap_dev->functions, AP_FUNC_ACCEL)) { 94 zc->card = ac;
80 zdev = zcrypt_device_alloc(CEX4A_MAX_MESSAGE_SIZE); 95 ac->private = zc;
81 if (!zdev) 96 if (ap_test_bit(&ac->functions, AP_FUNC_ACCEL)) {
82 return -ENOMEM; 97 if (ac->ap_dev.device_type == AP_DEVICE_TYPE_CEX4) {
83 if (ap_dev->device_type == AP_DEVICE_TYPE_CEX4) { 98 zc->type_string = "CEX4A";
84 zdev->type_string = "CEX4A"; 99 zc->user_space_type = ZCRYPT_CEX4;
85 zdev->speed_rating = CEX4A_SPEED_RATING; 100 memcpy(zc->speed_rating, CEX4A_SPEED_IDX,
86 } else { 101 sizeof(CEX4A_SPEED_IDX));
87 zdev->type_string = "CEX5A"; 102 } else {
88 zdev->speed_rating = CEX5A_SPEED_RATING; 103 zc->type_string = "CEX5A";
89 } 104 zc->user_space_type = ZCRYPT_CEX5;
90 zdev->user_space_type = ZCRYPT_CEX3A; 105 memcpy(zc->speed_rating, CEX5A_SPEED_IDX,
91 zdev->min_mod_size = CEX4A_MIN_MOD_SIZE; 106 sizeof(CEX5A_SPEED_IDX));
92 if (ap_test_bit(&ap_dev->functions, AP_FUNC_MEX4K) &&
93 ap_test_bit(&ap_dev->functions, AP_FUNC_CRT4K)) {
94 zdev->max_mod_size =
95 CEX4A_MAX_MOD_SIZE_4K;
96 zdev->max_exp_bit_length =
97 CEX4A_MAX_MOD_SIZE_4K;
98 } else {
99 zdev->max_mod_size =
100 CEX4A_MAX_MOD_SIZE_2K;
101 zdev->max_exp_bit_length =
102 CEX4A_MAX_MOD_SIZE_2K;
103 }
104 zdev->short_crt = 1;
105 zdev->ops = zcrypt_msgtype_request(MSGTYPE50_NAME,
106 MSGTYPE50_VARIANT_DEFAULT);
107 } else if (ap_test_bit(&ap_dev->functions, AP_FUNC_COPRO)) {
108 zdev = zcrypt_device_alloc(CEX4C_MAX_MESSAGE_SIZE);
109 if (!zdev)
110 return -ENOMEM;
111 if (ap_dev->device_type == AP_DEVICE_TYPE_CEX4) {
112 zdev->type_string = "CEX4C";
113 zdev->speed_rating = CEX4C_SPEED_RATING;
114 } else {
115 zdev->type_string = "CEX5C";
116 zdev->speed_rating = CEX5C_SPEED_RATING;
117 }
118 zdev->user_space_type = ZCRYPT_CEX3C;
119 zdev->min_mod_size = CEX4C_MIN_MOD_SIZE;
120 zdev->max_mod_size = CEX4C_MAX_MOD_SIZE;
121 zdev->max_exp_bit_length = CEX4C_MAX_MOD_SIZE;
122 zdev->short_crt = 0;
123 zdev->ops = zcrypt_msgtype_request(MSGTYPE06_NAME,
124 MSGTYPE06_VARIANT_DEFAULT);
125 } else if (ap_test_bit(&ap_dev->functions, AP_FUNC_EP11)) {
126 zdev = zcrypt_device_alloc(CEX4C_MAX_MESSAGE_SIZE);
127 if (!zdev)
128 return -ENOMEM;
129 if (ap_dev->device_type == AP_DEVICE_TYPE_CEX4) {
130 zdev->type_string = "CEX4P";
131 zdev->speed_rating = CEX4P_SPEED_RATING;
132 } else {
133 zdev->type_string = "CEX5P";
134 zdev->speed_rating = CEX5P_SPEED_RATING;
135 }
136 zdev->user_space_type = ZCRYPT_CEX4;
137 zdev->min_mod_size = CEX4C_MIN_MOD_SIZE;
138 zdev->max_mod_size = CEX4C_MAX_MOD_SIZE;
139 zdev->max_exp_bit_length = CEX4C_MAX_MOD_SIZE;
140 zdev->short_crt = 0;
141 zdev->ops = zcrypt_msgtype_request(MSGTYPE06_NAME,
142 MSGTYPE06_VARIANT_EP11);
143 } 107 }
144 break; 108 zc->min_mod_size = CEX4A_MIN_MOD_SIZE;
145 } 109 if (ap_test_bit(&ac->functions, AP_FUNC_MEX4K) &&
146 if (!zdev) 110 ap_test_bit(&ac->functions, AP_FUNC_CRT4K)) {
111 zc->max_mod_size = CEX4A_MAX_MOD_SIZE_4K;
112 zc->max_exp_bit_length =
113 CEX4A_MAX_MOD_SIZE_4K;
114 } else {
115 zc->max_mod_size = CEX4A_MAX_MOD_SIZE_2K;
116 zc->max_exp_bit_length =
117 CEX4A_MAX_MOD_SIZE_2K;
118 }
119 } else if (ap_test_bit(&ac->functions, AP_FUNC_COPRO)) {
120 if (ac->ap_dev.device_type == AP_DEVICE_TYPE_CEX4) {
121 zc->type_string = "CEX4C";
122 /* wrong user space type, must be CEX4
123 * just keep it for cca compatibility
124 */
125 zc->user_space_type = ZCRYPT_CEX3C;
126 memcpy(zc->speed_rating, CEX4C_SPEED_IDX,
127 sizeof(CEX4C_SPEED_IDX));
128 } else {
129 zc->type_string = "CEX5C";
130 /* wrong user space type, must be CEX5
131 * just keep it for cca compatibility
132 */
133 zc->user_space_type = ZCRYPT_CEX3C;
134 memcpy(zc->speed_rating, CEX5C_SPEED_IDX,
135 sizeof(CEX5C_SPEED_IDX));
136 }
137 zc->min_mod_size = CEX4C_MIN_MOD_SIZE;
138 zc->max_mod_size = CEX4C_MAX_MOD_SIZE;
139 zc->max_exp_bit_length = CEX4C_MAX_MOD_SIZE;
140 } else if (ap_test_bit(&ac->functions, AP_FUNC_EP11)) {
141 if (ac->ap_dev.device_type == AP_DEVICE_TYPE_CEX4) {
142 zc->type_string = "CEX4P";
143 zc->user_space_type = ZCRYPT_CEX4;
144 memcpy(zc->speed_rating, CEX4P_SPEED_IDX,
145 sizeof(CEX4P_SPEED_IDX));
146 } else {
147 zc->type_string = "CEX5P";
148 zc->user_space_type = ZCRYPT_CEX5;
149 memcpy(zc->speed_rating, CEX5P_SPEED_IDX,
150 sizeof(CEX5P_SPEED_IDX));
151 }
152 zc->min_mod_size = CEX4C_MIN_MOD_SIZE;
153 zc->max_mod_size = CEX4C_MAX_MOD_SIZE;
154 zc->max_exp_bit_length = CEX4C_MAX_MOD_SIZE;
155 } else {
156 zcrypt_card_free(zc);
147 return -ENODEV; 157 return -ENODEV;
148 zdev->ap_dev = ap_dev; 158 }
149 zdev->online = 1; 159 zc->online = 1;
150 ap_device_init_reply(ap_dev, &zdev->reply); 160
151 ap_dev->private = zdev; 161 rc = zcrypt_card_register(zc);
152 rc = zcrypt_device_register(zdev);
153 if (rc) { 162 if (rc) {
154 zcrypt_msgtype_release(zdev->ops); 163 ac->private = NULL;
155 ap_dev->private = NULL; 164 zcrypt_card_free(zc);
156 zcrypt_device_free(zdev);
157 } 165 }
166
158 return rc; 167 return rc;
159} 168}
160 169
161/** 170/**
162 * This is called to remove the extended CEX4 driver information 171 * This is called to remove the CEX4 card driver information
163 * if an AP device is removed. 172 * if an AP card device is removed.
173 */
174static void zcrypt_cex4_card_remove(struct ap_device *ap_dev)
175{
176 struct zcrypt_card *zc = to_ap_card(&ap_dev->device)->private;
177
178 if (zc)
179 zcrypt_card_unregister(zc);
180}
181
182static struct ap_driver zcrypt_cex4_card_driver = {
183 .probe = zcrypt_cex4_card_probe,
184 .remove = zcrypt_cex4_card_remove,
185 .ids = zcrypt_cex4_card_ids,
186};
187
188/**
189 * Probe function for CEX4 queue device. It always accepts the AP device
190 * since the bus_match already checked the hardware type.
191 * @ap_dev: pointer to the AP device.
164 */ 192 */
165static void zcrypt_cex4_remove(struct ap_device *ap_dev) 193static int zcrypt_cex4_queue_probe(struct ap_device *ap_dev)
166{ 194{
167 struct zcrypt_device *zdev = ap_dev->private; 195 struct ap_queue *aq = to_ap_queue(&ap_dev->device);
168 struct zcrypt_ops *zops; 196 struct zcrypt_queue *zq;
197 int rc;
169 198
170 if (zdev) { 199 if (ap_test_bit(&aq->card->functions, AP_FUNC_ACCEL)) {
171 zops = zdev->ops; 200 zq = zcrypt_queue_alloc(CEX4A_MAX_MESSAGE_SIZE);
172 zcrypt_device_unregister(zdev); 201 if (!zq)
173 zcrypt_msgtype_release(zops); 202 return -ENOMEM;
203 zq->ops = zcrypt_msgtype(MSGTYPE50_NAME,
204 MSGTYPE50_VARIANT_DEFAULT);
205 } else if (ap_test_bit(&aq->card->functions, AP_FUNC_COPRO)) {
206 zq = zcrypt_queue_alloc(CEX4C_MAX_MESSAGE_SIZE);
207 if (!zq)
208 return -ENOMEM;
209 zq->ops = zcrypt_msgtype(MSGTYPE06_NAME,
210 MSGTYPE06_VARIANT_DEFAULT);
211 } else if (ap_test_bit(&aq->card->functions, AP_FUNC_EP11)) {
212 zq = zcrypt_queue_alloc(CEX4C_MAX_MESSAGE_SIZE);
213 if (!zq)
214 return -ENOMEM;
215 zq->ops = zcrypt_msgtype(MSGTYPE06_NAME,
216 MSGTYPE06_VARIANT_EP11);
217 } else {
218 return -ENODEV;
174 } 219 }
220 zq->queue = aq;
221 zq->online = 1;
222 atomic_set(&zq->load, 0);
223 ap_queue_init_reply(aq, &zq->reply);
224 aq->request_timeout = CEX4_CLEANUP_TIME,
225 aq->private = zq;
226 rc = zcrypt_queue_register(zq);
227 if (rc) {
228 aq->private = NULL;
229 zcrypt_queue_free(zq);
230 }
231
232 return rc;
233}
234
235/**
236 * This is called to remove the CEX4 queue driver information
237 * if an AP queue device is removed.
238 */
239static void zcrypt_cex4_queue_remove(struct ap_device *ap_dev)
240{
241 struct ap_queue *aq = to_ap_queue(&ap_dev->device);
242 struct zcrypt_queue *zq = aq->private;
243
244 ap_queue_remove(aq);
245 if (zq)
246 zcrypt_queue_unregister(zq);
175} 247}
176 248
249static struct ap_driver zcrypt_cex4_queue_driver = {
250 .probe = zcrypt_cex4_queue_probe,
251 .remove = zcrypt_cex4_queue_remove,
252 .suspend = ap_queue_suspend,
253 .resume = ap_queue_resume,
254 .ids = zcrypt_cex4_queue_ids,
255};
256
177int __init zcrypt_cex4_init(void) 257int __init zcrypt_cex4_init(void)
178{ 258{
179 return ap_driver_register(&zcrypt_cex4_driver, THIS_MODULE, "cex4"); 259 int rc;
260
261 rc = ap_driver_register(&zcrypt_cex4_card_driver,
262 THIS_MODULE, "cex4card");
263 if (rc)
264 return rc;
265
266 rc = ap_driver_register(&zcrypt_cex4_queue_driver,
267 THIS_MODULE, "cex4queue");
268 if (rc)
269 ap_driver_unregister(&zcrypt_cex4_card_driver);
270
271 return rc;
180} 272}
181 273
182void __exit zcrypt_cex4_exit(void) 274void __exit zcrypt_cex4_exit(void)
183{ 275{
184 ap_driver_unregister(&zcrypt_cex4_driver); 276 ap_driver_unregister(&zcrypt_cex4_queue_driver);
277 ap_driver_unregister(&zcrypt_cex4_card_driver);
185} 278}
186 279
187module_init(zcrypt_cex4_init); 280module_init(zcrypt_cex4_init);
diff --git a/drivers/s390/crypto/zcrypt_debug.h b/drivers/s390/crypto/zcrypt_debug.h
index 28d9349de1ad..13e38defb6b8 100644
--- a/drivers/s390/crypto/zcrypt_debug.h
+++ b/drivers/s390/crypto/zcrypt_debug.h
@@ -1,51 +1,27 @@
1/* 1/*
2 * Copyright IBM Corp. 2012 2 * Copyright IBM Corp. 2016
3 * Author(s): Holger Dengler (hd@linux.vnet.ibm.com) 3 * Author(s): Holger Dengler (hd@linux.vnet.ibm.com)
4 * Harald Freudenberger <freude@de.ibm.com>
4 */ 5 */
5#ifndef ZCRYPT_DEBUG_H 6#ifndef ZCRYPT_DEBUG_H
6#define ZCRYPT_DEBUG_H 7#define ZCRYPT_DEBUG_H
7 8
8#include <asm/debug.h> 9#include <asm/debug.h>
9#include "zcrypt_api.h"
10 10
11/* that gives us 15 characters in the text event views */ 11#define DBF_ERR 3 /* error conditions */
12#define ZCRYPT_DBF_LEN 16 12#define DBF_WARN 4 /* warning conditions */
13 13#define DBF_INFO 5 /* informational */
14#define DBF_ERR 3 /* error conditions */ 14#define DBF_DEBUG 6 /* for debugging only */
15#define DBF_WARN 4 /* warning conditions */
16#define DBF_INFO 6 /* informational */
17 15
16#define RC2ERR(rc) ((rc) ? DBF_ERR : DBF_INFO)
18#define RC2WARN(rc) ((rc) ? DBF_WARN : DBF_INFO) 17#define RC2WARN(rc) ((rc) ? DBF_WARN : DBF_INFO)
19 18
20#define ZCRYPT_DBF_COMMON(level, text...) \ 19#define DBF_MAX_SPRINTF_ARGS 5
21 do { \ 20
22 if (debug_level_enabled(zcrypt_dbf_common, level)) { \ 21#define ZCRYPT_DBF(...) \
23 char debug_buffer[ZCRYPT_DBF_LEN]; \ 22 debug_sprintf_event(zcrypt_dbf_info, ##__VA_ARGS__)
24 snprintf(debug_buffer, ZCRYPT_DBF_LEN, text); \ 23
25 debug_text_event(zcrypt_dbf_common, level, \ 24extern debug_info_t *zcrypt_dbf_info;
26 debug_buffer); \
27 } \
28 } while (0)
29
30#define ZCRYPT_DBF_DEVICES(level, text...) \
31 do { \
32 if (debug_level_enabled(zcrypt_dbf_devices, level)) { \
33 char debug_buffer[ZCRYPT_DBF_LEN]; \
34 snprintf(debug_buffer, ZCRYPT_DBF_LEN, text); \
35 debug_text_event(zcrypt_dbf_devices, level, \
36 debug_buffer); \
37 } \
38 } while (0)
39
40#define ZCRYPT_DBF_DEV(level, device, text...) \
41 do { \
42 if (debug_level_enabled(device->dbf_area, level)) { \
43 char debug_buffer[ZCRYPT_DBF_LEN]; \
44 snprintf(debug_buffer, ZCRYPT_DBF_LEN, text); \
45 debug_text_event(device->dbf_area, level, \
46 debug_buffer); \
47 } \
48 } while (0)
49 25
50int zcrypt_debug_init(void); 26int zcrypt_debug_init(void);
51void zcrypt_debug_exit(void); 27void zcrypt_debug_exit(void);
diff --git a/drivers/s390/crypto/zcrypt_error.h b/drivers/s390/crypto/zcrypt_error.h
index de1b6c1d172c..13df60209ed3 100644
--- a/drivers/s390/crypto/zcrypt_error.h
+++ b/drivers/s390/crypto/zcrypt_error.h
@@ -55,52 +55,61 @@ struct error_hdr {
55#define TYPE82_RSP_CODE 0x82 55#define TYPE82_RSP_CODE 0x82
56#define TYPE88_RSP_CODE 0x88 56#define TYPE88_RSP_CODE 0x88
57 57
58#define REP82_ERROR_MACHINE_FAILURE 0x10 58#define REP82_ERROR_MACHINE_FAILURE 0x10
59#define REP82_ERROR_PREEMPT_FAILURE 0x12 59#define REP82_ERROR_PREEMPT_FAILURE 0x12
60#define REP82_ERROR_CHECKPT_FAILURE 0x14 60#define REP82_ERROR_CHECKPT_FAILURE 0x14
61#define REP82_ERROR_MESSAGE_TYPE 0x20 61#define REP82_ERROR_MESSAGE_TYPE 0x20
62#define REP82_ERROR_INVALID_COMM_CD 0x21 /* Type 84 */ 62#define REP82_ERROR_INVALID_COMM_CD 0x21 /* Type 84 */
63#define REP82_ERROR_INVALID_MSG_LEN 0x23 63#define REP82_ERROR_INVALID_MSG_LEN 0x23
64#define REP82_ERROR_RESERVD_FIELD 0x24 /* was 0x50 */ 64#define REP82_ERROR_RESERVD_FIELD 0x24 /* was 0x50 */
65#define REP82_ERROR_FORMAT_FIELD 0x29 65#define REP82_ERROR_FORMAT_FIELD 0x29
66#define REP82_ERROR_INVALID_COMMAND 0x30 66#define REP82_ERROR_INVALID_COMMAND 0x30
67#define REP82_ERROR_MALFORMED_MSG 0x40 67#define REP82_ERROR_MALFORMED_MSG 0x40
68#define REP82_ERROR_RESERVED_FIELDO 0x50 /* old value */ 68#define REP82_ERROR_INVALID_DOMAIN_PRECHECK 0x42
69#define REP82_ERROR_WORD_ALIGNMENT 0x60 69#define REP82_ERROR_RESERVED_FIELDO 0x50 /* old value */
70#define REP82_ERROR_MESSAGE_LENGTH 0x80 70#define REP82_ERROR_WORD_ALIGNMENT 0x60
71#define REP82_ERROR_OPERAND_INVALID 0x82 71#define REP82_ERROR_MESSAGE_LENGTH 0x80
72#define REP82_ERROR_OPERAND_SIZE 0x84 72#define REP82_ERROR_OPERAND_INVALID 0x82
73#define REP82_ERROR_EVEN_MOD_IN_OPND 0x85 73#define REP82_ERROR_OPERAND_SIZE 0x84
74#define REP82_ERROR_RESERVED_FIELD 0x88 74#define REP82_ERROR_EVEN_MOD_IN_OPND 0x85
75#define REP82_ERROR_TRANSPORT_FAIL 0x90 75#define REP82_ERROR_RESERVED_FIELD 0x88
76#define REP82_ERROR_PACKET_TRUNCATED 0xA0 76#define REP82_ERROR_INVALID_DOMAIN_PENDING 0x8A
77#define REP82_ERROR_ZERO_BUFFER_LEN 0xB0 77#define REP82_ERROR_TRANSPORT_FAIL 0x90
78#define REP82_ERROR_PACKET_TRUNCATED 0xA0
79#define REP82_ERROR_ZERO_BUFFER_LEN 0xB0
78 80
79#define REP88_ERROR_MODULE_FAILURE 0x10 81#define REP88_ERROR_MODULE_FAILURE 0x10
80 82
81#define REP88_ERROR_MESSAGE_TYPE 0x20 83#define REP88_ERROR_MESSAGE_TYPE 0x20
82#define REP88_ERROR_MESSAGE_MALFORMD 0x22 84#define REP88_ERROR_MESSAGE_MALFORMD 0x22
83#define REP88_ERROR_MESSAGE_LENGTH 0x23 85#define REP88_ERROR_MESSAGE_LENGTH 0x23
84#define REP88_ERROR_RESERVED_FIELD 0x24 86#define REP88_ERROR_RESERVED_FIELD 0x24
85#define REP88_ERROR_KEY_TYPE 0x34 87#define REP88_ERROR_KEY_TYPE 0x34
86#define REP88_ERROR_INVALID_KEY 0x82 /* CEX2A */ 88#define REP88_ERROR_INVALID_KEY 0x82 /* CEX2A */
87#define REP88_ERROR_OPERAND 0x84 /* CEX2A */ 89#define REP88_ERROR_OPERAND 0x84 /* CEX2A */
88#define REP88_ERROR_OPERAND_EVEN_MOD 0x85 /* CEX2A */ 90#define REP88_ERROR_OPERAND_EVEN_MOD 0x85 /* CEX2A */
89 91
90static inline int convert_error(struct zcrypt_device *zdev, 92static inline int convert_error(struct zcrypt_queue *zq,
91 struct ap_message *reply) 93 struct ap_message *reply)
92{ 94{
93 struct error_hdr *ehdr = reply->message; 95 struct error_hdr *ehdr = reply->message;
96 int card = AP_QID_CARD(zq->queue->qid);
97 int queue = AP_QID_QUEUE(zq->queue->qid);
94 98
95 switch (ehdr->reply_code) { 99 switch (ehdr->reply_code) {
96 case REP82_ERROR_OPERAND_INVALID: 100 case REP82_ERROR_OPERAND_INVALID:
97 case REP82_ERROR_OPERAND_SIZE: 101 case REP82_ERROR_OPERAND_SIZE:
98 case REP82_ERROR_EVEN_MOD_IN_OPND: 102 case REP82_ERROR_EVEN_MOD_IN_OPND:
99 case REP88_ERROR_MESSAGE_MALFORMD: 103 case REP88_ERROR_MESSAGE_MALFORMD:
104 case REP82_ERROR_INVALID_DOMAIN_PRECHECK:
105 case REP82_ERROR_INVALID_DOMAIN_PENDING:
100 // REP88_ERROR_INVALID_KEY // '82' CEX2A 106 // REP88_ERROR_INVALID_KEY // '82' CEX2A
101 // REP88_ERROR_OPERAND // '84' CEX2A 107 // REP88_ERROR_OPERAND // '84' CEX2A
102 // REP88_ERROR_OPERAND_EVEN_MOD // '85' CEX2A 108 // REP88_ERROR_OPERAND_EVEN_MOD // '85' CEX2A
103 /* Invalid input data. */ 109 /* Invalid input data. */
110 ZCRYPT_DBF(DBF_WARN,
111 "device=%02x.%04x reply=0x%02x => rc=EINVAL\n",
112 card, queue, ehdr->reply_code);
104 return -EINVAL; 113 return -EINVAL;
105 case REP82_ERROR_MESSAGE_TYPE: 114 case REP82_ERROR_MESSAGE_TYPE:
106 // REP88_ERROR_MESSAGE_TYPE // '20' CEX2A 115 // REP88_ERROR_MESSAGE_TYPE // '20' CEX2A
@@ -110,32 +119,32 @@ static inline int convert_error(struct zcrypt_device *zdev,
110 * and then repeat the request. 119 * and then repeat the request.
111 */ 120 */
112 atomic_set(&zcrypt_rescan_req, 1); 121 atomic_set(&zcrypt_rescan_req, 1);
113 zdev->online = 0; 122 zq->online = 0;
114 pr_err("Cryptographic device %x failed and was set offline\n", 123 pr_err("Cryptographic device %02x.%04x failed and was set offline\n",
115 AP_QID_DEVICE(zdev->ap_dev->qid)); 124 card, queue);
116 ZCRYPT_DBF_DEV(DBF_ERR, zdev, "dev%04xo%drc%d", 125 ZCRYPT_DBF(DBF_ERR,
117 AP_QID_DEVICE(zdev->ap_dev->qid), zdev->online, 126 "device=%02x.%04x reply=0x%02x => online=0 rc=EAGAIN\n",
118 ehdr->reply_code); 127 card, queue, ehdr->reply_code);
119 return -EAGAIN; 128 return -EAGAIN;
120 case REP82_ERROR_TRANSPORT_FAIL: 129 case REP82_ERROR_TRANSPORT_FAIL:
121 case REP82_ERROR_MACHINE_FAILURE: 130 case REP82_ERROR_MACHINE_FAILURE:
122 // REP88_ERROR_MODULE_FAILURE // '10' CEX2A 131 // REP88_ERROR_MODULE_FAILURE // '10' CEX2A
123 /* If a card fails disable it and repeat the request. */ 132 /* If a card fails disable it and repeat the request. */
124 atomic_set(&zcrypt_rescan_req, 1); 133 atomic_set(&zcrypt_rescan_req, 1);
125 zdev->online = 0; 134 zq->online = 0;
126 pr_err("Cryptographic device %x failed and was set offline\n", 135 pr_err("Cryptographic device %02x.%04x failed and was set offline\n",
127 AP_QID_DEVICE(zdev->ap_dev->qid)); 136 card, queue);
128 ZCRYPT_DBF_DEV(DBF_ERR, zdev, "dev%04xo%drc%d", 137 ZCRYPT_DBF(DBF_ERR,
129 AP_QID_DEVICE(zdev->ap_dev->qid), zdev->online, 138 "device=%02x.%04x reply=0x%02x => online=0 rc=EAGAIN\n",
130 ehdr->reply_code); 139 card, queue, ehdr->reply_code);
131 return -EAGAIN; 140 return -EAGAIN;
132 default: 141 default:
133 zdev->online = 0; 142 zq->online = 0;
134 pr_err("Cryptographic device %x failed and was set offline\n", 143 pr_err("Cryptographic device %02x.%04x failed and was set offline\n",
135 AP_QID_DEVICE(zdev->ap_dev->qid)); 144 card, queue);
136 ZCRYPT_DBF_DEV(DBF_ERR, zdev, "dev%04xo%drc%d", 145 ZCRYPT_DBF(DBF_ERR,
137 AP_QID_DEVICE(zdev->ap_dev->qid), zdev->online, 146 "device=%02x.%04x reply=0x%02x => online=0 rc=EAGAIN\n",
138 ehdr->reply_code); 147 card, queue, ehdr->reply_code);
139 return -EAGAIN; /* repeat the request on a different device. */ 148 return -EAGAIN; /* repeat the request on a different device. */
140 } 149 }
141} 150}
diff --git a/drivers/s390/crypto/zcrypt_msgtype50.c b/drivers/s390/crypto/zcrypt_msgtype50.c
index eedfaa2cf715..6dd5d7c58dd0 100644
--- a/drivers/s390/crypto/zcrypt_msgtype50.c
+++ b/drivers/s390/crypto/zcrypt_msgtype50.c
@@ -53,9 +53,6 @@ MODULE_DESCRIPTION("Cryptographic Accelerator (message type 50), " \
53 "Copyright IBM Corp. 2001, 2012"); 53 "Copyright IBM Corp. 2001, 2012");
54MODULE_LICENSE("GPL"); 54MODULE_LICENSE("GPL");
55 55
56static void zcrypt_cex2a_receive(struct ap_device *, struct ap_message *,
57 struct ap_message *);
58
59/** 56/**
60 * The type 50 message family is associated with a CEX2A card. 57 * The type 50 message family is associated with a CEX2A card.
61 * 58 *
@@ -173,16 +170,48 @@ struct type80_hdr {
173 unsigned char reserved3[8]; 170 unsigned char reserved3[8];
174} __packed; 171} __packed;
175 172
173unsigned int get_rsa_modex_fc(struct ica_rsa_modexpo *mex, int *fcode)
174{
175
176 if (!mex->inputdatalength)
177 return -EINVAL;
178
179 if (mex->inputdatalength <= 128) /* 1024 bit */
180 *fcode = MEX_1K;
181 else if (mex->inputdatalength <= 256) /* 2048 bit */
182 *fcode = MEX_2K;
183 else /* 4096 bit */
184 *fcode = MEX_4K;
185
186 return 0;
187}
188
189unsigned int get_rsa_crt_fc(struct ica_rsa_modexpo_crt *crt, int *fcode)
190{
191
192 if (!crt->inputdatalength)
193 return -EINVAL;
194
195 if (crt->inputdatalength <= 128) /* 1024 bit */
196 *fcode = CRT_1K;
197 else if (crt->inputdatalength <= 256) /* 2048 bit */
198 *fcode = CRT_2K;
199 else /* 4096 bit */
200 *fcode = CRT_4K;
201
202 return 0;
203}
204
176/** 205/**
177 * Convert a ICAMEX message to a type50 MEX message. 206 * Convert a ICAMEX message to a type50 MEX message.
178 * 207 *
179 * @zdev: crypto device pointer 208 * @zq: crypto queue pointer
180 * @zreq: crypto request pointer 209 * @ap_msg: crypto request pointer
181 * @mex: pointer to user input data 210 * @mex: pointer to user input data
182 * 211 *
183 * Returns 0 on success or -EFAULT. 212 * Returns 0 on success or -EFAULT.
184 */ 213 */
185static int ICAMEX_msg_to_type50MEX_msg(struct zcrypt_device *zdev, 214static int ICAMEX_msg_to_type50MEX_msg(struct zcrypt_queue *zq,
186 struct ap_message *ap_msg, 215 struct ap_message *ap_msg,
187 struct ica_rsa_modexpo *mex) 216 struct ica_rsa_modexpo *mex)
188{ 217{
@@ -234,13 +263,13 @@ static int ICAMEX_msg_to_type50MEX_msg(struct zcrypt_device *zdev,
234/** 263/**
235 * Convert a ICACRT message to a type50 CRT message. 264 * Convert a ICACRT message to a type50 CRT message.
236 * 265 *
237 * @zdev: crypto device pointer 266 * @zq: crypto queue pointer
238 * @zreq: crypto request pointer 267 * @ap_msg: crypto request pointer
239 * @crt: pointer to user input data 268 * @crt: pointer to user input data
240 * 269 *
241 * Returns 0 on success or -EFAULT. 270 * Returns 0 on success or -EFAULT.
242 */ 271 */
243static int ICACRT_msg_to_type50CRT_msg(struct zcrypt_device *zdev, 272static int ICACRT_msg_to_type50CRT_msg(struct zcrypt_queue *zq,
244 struct ap_message *ap_msg, 273 struct ap_message *ap_msg,
245 struct ica_rsa_modexpo_crt *crt) 274 struct ica_rsa_modexpo_crt *crt)
246{ 275{
@@ -283,7 +312,7 @@ static int ICACRT_msg_to_type50CRT_msg(struct zcrypt_device *zdev,
283 u = crb2->u + sizeof(crb2->u) - short_len; 312 u = crb2->u + sizeof(crb2->u) - short_len;
284 inp = crb2->message + sizeof(crb2->message) - mod_len; 313 inp = crb2->message + sizeof(crb2->message) - mod_len;
285 } else if ((mod_len <= 512) && /* up to 4096 bit key size */ 314 } else if ((mod_len <= 512) && /* up to 4096 bit key size */
286 (zdev->max_mod_size == CEX3A_MAX_MOD_SIZE)) { /* >= CEX3A */ 315 (zq->zcard->max_mod_size == CEX3A_MAX_MOD_SIZE)) {
287 struct type50_crb3_msg *crb3 = ap_msg->message; 316 struct type50_crb3_msg *crb3 = ap_msg->message;
288 memset(crb3, 0, sizeof(*crb3)); 317 memset(crb3, 0, sizeof(*crb3));
289 ap_msg->length = sizeof(*crb3); 318 ap_msg->length = sizeof(*crb3);
@@ -317,14 +346,14 @@ static int ICACRT_msg_to_type50CRT_msg(struct zcrypt_device *zdev,
317/** 346/**
318 * Copy results from a type 80 reply message back to user space. 347 * Copy results from a type 80 reply message back to user space.
319 * 348 *
320 * @zdev: crypto device pointer 349 * @zq: crypto device pointer
321 * @reply: reply AP message. 350 * @reply: reply AP message.
322 * @data: pointer to user output data 351 * @data: pointer to user output data
323 * @length: size of user output data 352 * @length: size of user output data
324 * 353 *
325 * Returns 0 on success or -EFAULT. 354 * Returns 0 on success or -EFAULT.
326 */ 355 */
327static int convert_type80(struct zcrypt_device *zdev, 356static int convert_type80(struct zcrypt_queue *zq,
328 struct ap_message *reply, 357 struct ap_message *reply,
329 char __user *outputdata, 358 char __user *outputdata,
330 unsigned int outputdatalength) 359 unsigned int outputdatalength)
@@ -334,16 +363,18 @@ static int convert_type80(struct zcrypt_device *zdev,
334 363
335 if (t80h->len < sizeof(*t80h) + outputdatalength) { 364 if (t80h->len < sizeof(*t80h) + outputdatalength) {
336 /* The result is too short, the CEX2A card may not do that.. */ 365 /* The result is too short, the CEX2A card may not do that.. */
337 zdev->online = 0; 366 zq->online = 0;
338 pr_err("Cryptographic device %x failed and was set offline\n", 367 pr_err("Cryptographic device %02x.%04x failed and was set offline\n",
339 AP_QID_DEVICE(zdev->ap_dev->qid)); 368 AP_QID_CARD(zq->queue->qid),
340 ZCRYPT_DBF_DEV(DBF_ERR, zdev, "dev%04xo%drc%d", 369 AP_QID_QUEUE(zq->queue->qid));
341 AP_QID_DEVICE(zdev->ap_dev->qid), 370 ZCRYPT_DBF(DBF_ERR,
342 zdev->online, t80h->code); 371 "device=%02x.%04x code=0x%02x => online=0 rc=EAGAIN\n",
343 372 AP_QID_CARD(zq->queue->qid),
373 AP_QID_QUEUE(zq->queue->qid),
374 t80h->code);
344 return -EAGAIN; /* repeat the request on a different device. */ 375 return -EAGAIN; /* repeat the request on a different device. */
345 } 376 }
346 if (zdev->user_space_type == ZCRYPT_CEX2A) 377 if (zq->zcard->user_space_type == ZCRYPT_CEX2A)
347 BUG_ON(t80h->len > CEX2A_MAX_RESPONSE_SIZE); 378 BUG_ON(t80h->len > CEX2A_MAX_RESPONSE_SIZE);
348 else 379 else
349 BUG_ON(t80h->len > CEX3A_MAX_RESPONSE_SIZE); 380 BUG_ON(t80h->len > CEX3A_MAX_RESPONSE_SIZE);
@@ -353,25 +384,31 @@ static int convert_type80(struct zcrypt_device *zdev,
353 return 0; 384 return 0;
354} 385}
355 386
356static int convert_response(struct zcrypt_device *zdev, 387static int convert_response(struct zcrypt_queue *zq,
357 struct ap_message *reply, 388 struct ap_message *reply,
358 char __user *outputdata, 389 char __user *outputdata,
359 unsigned int outputdatalength) 390 unsigned int outputdatalength)
360{ 391{
361 /* Response type byte is the second byte in the response. */ 392 /* Response type byte is the second byte in the response. */
362 switch (((unsigned char *) reply->message)[1]) { 393 unsigned char rtype = ((unsigned char *) reply->message)[1];
394
395 switch (rtype) {
363 case TYPE82_RSP_CODE: 396 case TYPE82_RSP_CODE:
364 case TYPE88_RSP_CODE: 397 case TYPE88_RSP_CODE:
365 return convert_error(zdev, reply); 398 return convert_error(zq, reply);
366 case TYPE80_RSP_CODE: 399 case TYPE80_RSP_CODE:
367 return convert_type80(zdev, reply, 400 return convert_type80(zq, reply,
368 outputdata, outputdatalength); 401 outputdata, outputdatalength);
369 default: /* Unknown response type, this should NEVER EVER happen */ 402 default: /* Unknown response type, this should NEVER EVER happen */
370 zdev->online = 0; 403 zq->online = 0;
371 pr_err("Cryptographic device %x failed and was set offline\n", 404 pr_err("Cryptographic device %02x.%04x failed and was set offline\n",
372 AP_QID_DEVICE(zdev->ap_dev->qid)); 405 AP_QID_CARD(zq->queue->qid),
373 ZCRYPT_DBF_DEV(DBF_ERR, zdev, "dev%04xo%dfail", 406 AP_QID_QUEUE(zq->queue->qid));
374 AP_QID_DEVICE(zdev->ap_dev->qid), zdev->online); 407 ZCRYPT_DBF(DBF_ERR,
408 "device=%02x.%04x rtype=0x%02x => online=0 rc=EAGAIN\n",
409 AP_QID_CARD(zq->queue->qid),
410 AP_QID_QUEUE(zq->queue->qid),
411 (unsigned int) rtype);
375 return -EAGAIN; /* repeat the request on a different device. */ 412 return -EAGAIN; /* repeat the request on a different device. */
376 } 413 }
377} 414}
@@ -380,11 +417,11 @@ static int convert_response(struct zcrypt_device *zdev,
380 * This function is called from the AP bus code after a crypto request 417 * This function is called from the AP bus code after a crypto request
381 * "msg" has finished with the reply message "reply". 418 * "msg" has finished with the reply message "reply".
382 * It is called from tasklet context. 419 * It is called from tasklet context.
383 * @ap_dev: pointer to the AP device 420 * @aq: pointer to the AP device
384 * @msg: pointer to the AP message 421 * @msg: pointer to the AP message
385 * @reply: pointer to the AP reply message 422 * @reply: pointer to the AP reply message
386 */ 423 */
387static void zcrypt_cex2a_receive(struct ap_device *ap_dev, 424static void zcrypt_cex2a_receive(struct ap_queue *aq,
388 struct ap_message *msg, 425 struct ap_message *msg,
389 struct ap_message *reply) 426 struct ap_message *reply)
390{ 427{
@@ -400,7 +437,7 @@ static void zcrypt_cex2a_receive(struct ap_device *ap_dev,
400 goto out; /* ap_msg->rc indicates the error */ 437 goto out; /* ap_msg->rc indicates the error */
401 t80h = reply->message; 438 t80h = reply->message;
402 if (t80h->type == TYPE80_RSP_CODE) { 439 if (t80h->type == TYPE80_RSP_CODE) {
403 if (ap_dev->device_type == AP_DEVICE_TYPE_CEX2A) 440 if (aq->ap_dev.device_type == AP_DEVICE_TYPE_CEX2A)
404 length = min_t(int, 441 length = min_t(int,
405 CEX2A_MAX_RESPONSE_SIZE, t80h->len); 442 CEX2A_MAX_RESPONSE_SIZE, t80h->len);
406 else 443 else
@@ -418,11 +455,11 @@ static atomic_t zcrypt_step = ATOMIC_INIT(0);
418/** 455/**
419 * The request distributor calls this function if it picked the CEX2A 456 * The request distributor calls this function if it picked the CEX2A
420 * device to handle a modexpo request. 457 * device to handle a modexpo request.
421 * @zdev: pointer to zcrypt_device structure that identifies the 458 * @zq: pointer to zcrypt_queue structure that identifies the
422 * CEX2A device to the request distributor 459 * CEX2A device to the request distributor
423 * @mex: pointer to the modexpo request buffer 460 * @mex: pointer to the modexpo request buffer
424 */ 461 */
425static long zcrypt_cex2a_modexpo(struct zcrypt_device *zdev, 462static long zcrypt_cex2a_modexpo(struct zcrypt_queue *zq,
426 struct ica_rsa_modexpo *mex) 463 struct ica_rsa_modexpo *mex)
427{ 464{
428 struct ap_message ap_msg; 465 struct ap_message ap_msg;
@@ -430,7 +467,7 @@ static long zcrypt_cex2a_modexpo(struct zcrypt_device *zdev,
430 int rc; 467 int rc;
431 468
432 ap_init_message(&ap_msg); 469 ap_init_message(&ap_msg);
433 if (zdev->user_space_type == ZCRYPT_CEX2A) 470 if (zq->zcard->user_space_type == ZCRYPT_CEX2A)
434 ap_msg.message = kmalloc(MSGTYPE50_CRB2_MAX_MSG_SIZE, 471 ap_msg.message = kmalloc(MSGTYPE50_CRB2_MAX_MSG_SIZE,
435 GFP_KERNEL); 472 GFP_KERNEL);
436 else 473 else
@@ -442,20 +479,20 @@ static long zcrypt_cex2a_modexpo(struct zcrypt_device *zdev,
442 ap_msg.psmid = (((unsigned long long) current->pid) << 32) + 479 ap_msg.psmid = (((unsigned long long) current->pid) << 32) +
443 atomic_inc_return(&zcrypt_step); 480 atomic_inc_return(&zcrypt_step);
444 ap_msg.private = &work; 481 ap_msg.private = &work;
445 rc = ICAMEX_msg_to_type50MEX_msg(zdev, &ap_msg, mex); 482 rc = ICAMEX_msg_to_type50MEX_msg(zq, &ap_msg, mex);
446 if (rc) 483 if (rc)
447 goto out_free; 484 goto out_free;
448 init_completion(&work); 485 init_completion(&work);
449 ap_queue_message(zdev->ap_dev, &ap_msg); 486 ap_queue_message(zq->queue, &ap_msg);
450 rc = wait_for_completion_interruptible(&work); 487 rc = wait_for_completion_interruptible(&work);
451 if (rc == 0) { 488 if (rc == 0) {
452 rc = ap_msg.rc; 489 rc = ap_msg.rc;
453 if (rc == 0) 490 if (rc == 0)
454 rc = convert_response(zdev, &ap_msg, mex->outputdata, 491 rc = convert_response(zq, &ap_msg, mex->outputdata,
455 mex->outputdatalength); 492 mex->outputdatalength);
456 } else 493 } else
457 /* Signal pending. */ 494 /* Signal pending. */
458 ap_cancel_message(zdev->ap_dev, &ap_msg); 495 ap_cancel_message(zq->queue, &ap_msg);
459out_free: 496out_free:
460 kfree(ap_msg.message); 497 kfree(ap_msg.message);
461 return rc; 498 return rc;
@@ -464,11 +501,11 @@ out_free:
464/** 501/**
465 * The request distributor calls this function if it picked the CEX2A 502 * The request distributor calls this function if it picked the CEX2A
466 * device to handle a modexpo_crt request. 503 * device to handle a modexpo_crt request.
467 * @zdev: pointer to zcrypt_device structure that identifies the 504 * @zq: pointer to zcrypt_queue structure that identifies the
468 * CEX2A device to the request distributor 505 * CEX2A device to the request distributor
469 * @crt: pointer to the modexpoc_crt request buffer 506 * @crt: pointer to the modexpoc_crt request buffer
470 */ 507 */
471static long zcrypt_cex2a_modexpo_crt(struct zcrypt_device *zdev, 508static long zcrypt_cex2a_modexpo_crt(struct zcrypt_queue *zq,
472 struct ica_rsa_modexpo_crt *crt) 509 struct ica_rsa_modexpo_crt *crt)
473{ 510{
474 struct ap_message ap_msg; 511 struct ap_message ap_msg;
@@ -476,7 +513,7 @@ static long zcrypt_cex2a_modexpo_crt(struct zcrypt_device *zdev,
476 int rc; 513 int rc;
477 514
478 ap_init_message(&ap_msg); 515 ap_init_message(&ap_msg);
479 if (zdev->user_space_type == ZCRYPT_CEX2A) 516 if (zq->zcard->user_space_type == ZCRYPT_CEX2A)
480 ap_msg.message = kmalloc(MSGTYPE50_CRB2_MAX_MSG_SIZE, 517 ap_msg.message = kmalloc(MSGTYPE50_CRB2_MAX_MSG_SIZE,
481 GFP_KERNEL); 518 GFP_KERNEL);
482 else 519 else
@@ -488,20 +525,20 @@ static long zcrypt_cex2a_modexpo_crt(struct zcrypt_device *zdev,
488 ap_msg.psmid = (((unsigned long long) current->pid) << 32) + 525 ap_msg.psmid = (((unsigned long long) current->pid) << 32) +
489 atomic_inc_return(&zcrypt_step); 526 atomic_inc_return(&zcrypt_step);
490 ap_msg.private = &work; 527 ap_msg.private = &work;
491 rc = ICACRT_msg_to_type50CRT_msg(zdev, &ap_msg, crt); 528 rc = ICACRT_msg_to_type50CRT_msg(zq, &ap_msg, crt);
492 if (rc) 529 if (rc)
493 goto out_free; 530 goto out_free;
494 init_completion(&work); 531 init_completion(&work);
495 ap_queue_message(zdev->ap_dev, &ap_msg); 532 ap_queue_message(zq->queue, &ap_msg);
496 rc = wait_for_completion_interruptible(&work); 533 rc = wait_for_completion_interruptible(&work);
497 if (rc == 0) { 534 if (rc == 0) {
498 rc = ap_msg.rc; 535 rc = ap_msg.rc;
499 if (rc == 0) 536 if (rc == 0)
500 rc = convert_response(zdev, &ap_msg, crt->outputdata, 537 rc = convert_response(zq, &ap_msg, crt->outputdata,
501 crt->outputdatalength); 538 crt->outputdatalength);
502 } else 539 } else
503 /* Signal pending. */ 540 /* Signal pending. */
504 ap_cancel_message(zdev->ap_dev, &ap_msg); 541 ap_cancel_message(zq->queue, &ap_msg);
505out_free: 542out_free:
506 kfree(ap_msg.message); 543 kfree(ap_msg.message);
507 return rc; 544 return rc;
@@ -518,16 +555,12 @@ static struct zcrypt_ops zcrypt_msgtype50_ops = {
518 .variant = MSGTYPE50_VARIANT_DEFAULT, 555 .variant = MSGTYPE50_VARIANT_DEFAULT,
519}; 556};
520 557
521int __init zcrypt_msgtype50_init(void) 558void __init zcrypt_msgtype50_init(void)
522{ 559{
523 zcrypt_msgtype_register(&zcrypt_msgtype50_ops); 560 zcrypt_msgtype_register(&zcrypt_msgtype50_ops);
524 return 0;
525} 561}
526 562
527void __exit zcrypt_msgtype50_exit(void) 563void __exit zcrypt_msgtype50_exit(void)
528{ 564{
529 zcrypt_msgtype_unregister(&zcrypt_msgtype50_ops); 565 zcrypt_msgtype_unregister(&zcrypt_msgtype50_ops);
530} 566}
531
532module_init(zcrypt_msgtype50_init);
533module_exit(zcrypt_msgtype50_exit);
diff --git a/drivers/s390/crypto/zcrypt_msgtype50.h b/drivers/s390/crypto/zcrypt_msgtype50.h
index 0a66e4aeeb50..5cc280318ee7 100644
--- a/drivers/s390/crypto/zcrypt_msgtype50.h
+++ b/drivers/s390/crypto/zcrypt_msgtype50.h
@@ -35,7 +35,10 @@
35 35
36#define MSGTYPE_ADJUSTMENT 0x08 /*type04 extension (not needed in type50)*/ 36#define MSGTYPE_ADJUSTMENT 0x08 /*type04 extension (not needed in type50)*/
37 37
38int zcrypt_msgtype50_init(void); 38unsigned int get_rsa_modex_fc(struct ica_rsa_modexpo *, int *);
39unsigned int get_rsa_crt_fc(struct ica_rsa_modexpo_crt *, int *);
40
41void zcrypt_msgtype50_init(void);
39void zcrypt_msgtype50_exit(void); 42void zcrypt_msgtype50_exit(void);
40 43
41#endif /* _ZCRYPT_MSGTYPE50_H_ */ 44#endif /* _ZCRYPT_MSGTYPE50_H_ */
diff --git a/drivers/s390/crypto/zcrypt_msgtype6.c b/drivers/s390/crypto/zcrypt_msgtype6.c
index 21959719daef..e5563ffeb839 100644
--- a/drivers/s390/crypto/zcrypt_msgtype6.c
+++ b/drivers/s390/crypto/zcrypt_msgtype6.c
@@ -60,9 +60,6 @@ MODULE_DESCRIPTION("Cryptographic Coprocessor (message type 6), " \
60 "Copyright IBM Corp. 2001, 2012"); 60 "Copyright IBM Corp. 2001, 2012");
61MODULE_LICENSE("GPL"); 61MODULE_LICENSE("GPL");
62 62
63static void zcrypt_msgtype6_receive(struct ap_device *, struct ap_message *,
64 struct ap_message *);
65
66/** 63/**
67 * CPRB 64 * CPRB
68 * Note that all shorts, ints and longs are little-endian. 65 * Note that all shorts, ints and longs are little-endian.
@@ -149,16 +146,122 @@ static struct CPRBX static_cprbx = {
149 .func_id = {0x54, 0x32}, 146 .func_id = {0x54, 0x32},
150}; 147};
151 148
149int speed_idx_cca(int req_type)
150{
151 switch (req_type) {
152 case 0x4142:
153 case 0x4149:
154 case 0x414D:
155 case 0x4341:
156 case 0x4344:
157 case 0x4354:
158 case 0x4358:
159 case 0x444B:
160 case 0x4558:
161 case 0x4643:
162 case 0x4651:
163 case 0x4C47:
164 case 0x4C4B:
165 case 0x4C51:
166 case 0x4F48:
167 case 0x504F:
168 case 0x5053:
169 case 0x5058:
170 case 0x5343:
171 case 0x5344:
172 case 0x5345:
173 case 0x5350:
174 return LOW;
175 case 0x414B:
176 case 0x4345:
177 case 0x4349:
178 case 0x434D:
179 case 0x4847:
180 case 0x4849:
181 case 0x484D:
182 case 0x4850:
183 case 0x4851:
184 case 0x4954:
185 case 0x4958:
186 case 0x4B43:
187 case 0x4B44:
188 case 0x4B45:
189 case 0x4B47:
190 case 0x4B48:
191 case 0x4B49:
192 case 0x4B4E:
193 case 0x4B50:
194 case 0x4B52:
195 case 0x4B54:
196 case 0x4B58:
197 case 0x4D50:
198 case 0x4D53:
199 case 0x4D56:
200 case 0x4D58:
201 case 0x5044:
202 case 0x5045:
203 case 0x5046:
204 case 0x5047:
205 case 0x5049:
206 case 0x504B:
207 case 0x504D:
208 case 0x5254:
209 case 0x5347:
210 case 0x5349:
211 case 0x534B:
212 case 0x534D:
213 case 0x5356:
214 case 0x5358:
215 case 0x5443:
216 case 0x544B:
217 case 0x5647:
218 return HIGH;
219 default:
220 return MEDIUM;
221 }
222}
223
224int speed_idx_ep11(int req_type)
225{
226 switch (req_type) {
227 case 1:
228 case 2:
229 case 36:
230 case 37:
231 case 38:
232 case 39:
233 case 40:
234 return LOW;
235 case 17:
236 case 18:
237 case 19:
238 case 20:
239 case 21:
240 case 22:
241 case 26:
242 case 30:
243 case 31:
244 case 32:
245 case 33:
246 case 34:
247 case 35:
248 return HIGH;
249 default:
250 return MEDIUM;
251 }
252}
253
254
152/** 255/**
153 * Convert a ICAMEX message to a type6 MEX message. 256 * Convert a ICAMEX message to a type6 MEX message.
154 * 257 *
155 * @zdev: crypto device pointer 258 * @zq: crypto device pointer
156 * @ap_msg: pointer to AP message 259 * @ap_msg: pointer to AP message
157 * @mex: pointer to user input data 260 * @mex: pointer to user input data
158 * 261 *
159 * Returns 0 on success or -EFAULT. 262 * Returns 0 on success or -EFAULT.
160 */ 263 */
161static int ICAMEX_msg_to_type6MEX_msgX(struct zcrypt_device *zdev, 264static int ICAMEX_msg_to_type6MEX_msgX(struct zcrypt_queue *zq,
162 struct ap_message *ap_msg, 265 struct ap_message *ap_msg,
163 struct ica_rsa_modexpo *mex) 266 struct ica_rsa_modexpo *mex)
164{ 267{
@@ -173,11 +276,6 @@ static int ICAMEX_msg_to_type6MEX_msgX(struct zcrypt_device *zdev,
173 .ulen = 10, 276 .ulen = 10,
174 .only_rule = {'M', 'R', 'P', ' ', ' ', ' ', ' ', ' '} 277 .only_rule = {'M', 'R', 'P', ' ', ' ', ' ', ' ', ' '}
175 }; 278 };
176 static struct function_and_rules_block static_pke_fnr_MCL2 = {
177 .function_code = {'P', 'K'},
178 .ulen = 10,
179 .only_rule = {'Z', 'E', 'R', 'O', '-', 'P', 'A', 'D'}
180 };
181 struct { 279 struct {
182 struct type6_hdr hdr; 280 struct type6_hdr hdr;
183 struct CPRBX cprbx; 281 struct CPRBX cprbx;
@@ -204,11 +302,10 @@ static int ICAMEX_msg_to_type6MEX_msgX(struct zcrypt_device *zdev,
204 msg->hdr.FromCardLen1 = PCIXCC_MAX_ICA_RESPONSE_SIZE - sizeof(msg->hdr); 302 msg->hdr.FromCardLen1 = PCIXCC_MAX_ICA_RESPONSE_SIZE - sizeof(msg->hdr);
205 303
206 msg->cprbx = static_cprbx; 304 msg->cprbx = static_cprbx;
207 msg->cprbx.domain = AP_QID_QUEUE(zdev->ap_dev->qid); 305 msg->cprbx.domain = AP_QID_QUEUE(zq->queue->qid);
208 msg->cprbx.rpl_msgbl = msg->hdr.FromCardLen1; 306 msg->cprbx.rpl_msgbl = msg->hdr.FromCardLen1;
209 307
210 msg->fr = (zdev->user_space_type == ZCRYPT_PCIXCC_MCL2) ? 308 msg->fr = static_pke_fnr;
211 static_pke_fnr_MCL2 : static_pke_fnr;
212 309
213 msg->cprbx.req_parml = size - sizeof(msg->hdr) - sizeof(msg->cprbx); 310 msg->cprbx.req_parml = size - sizeof(msg->hdr) - sizeof(msg->cprbx);
214 311
@@ -219,13 +316,13 @@ static int ICAMEX_msg_to_type6MEX_msgX(struct zcrypt_device *zdev,
219/** 316/**
220 * Convert a ICACRT message to a type6 CRT message. 317 * Convert a ICACRT message to a type6 CRT message.
221 * 318 *
222 * @zdev: crypto device pointer 319 * @zq: crypto device pointer
223 * @ap_msg: pointer to AP message 320 * @ap_msg: pointer to AP message
224 * @crt: pointer to user input data 321 * @crt: pointer to user input data
225 * 322 *
226 * Returns 0 on success or -EFAULT. 323 * Returns 0 on success or -EFAULT.
227 */ 324 */
228static int ICACRT_msg_to_type6CRT_msgX(struct zcrypt_device *zdev, 325static int ICACRT_msg_to_type6CRT_msgX(struct zcrypt_queue *zq,
229 struct ap_message *ap_msg, 326 struct ap_message *ap_msg,
230 struct ica_rsa_modexpo_crt *crt) 327 struct ica_rsa_modexpo_crt *crt)
231{ 328{
@@ -241,11 +338,6 @@ static int ICACRT_msg_to_type6CRT_msgX(struct zcrypt_device *zdev,
241 .only_rule = {'Z', 'E', 'R', 'O', '-', 'P', 'A', 'D'} 338 .only_rule = {'Z', 'E', 'R', 'O', '-', 'P', 'A', 'D'}
242 }; 339 };
243 340
244 static struct function_and_rules_block static_pkd_fnr_MCL2 = {
245 .function_code = {'P', 'D'},
246 .ulen = 10,
247 .only_rule = {'P', 'K', 'C', 'S', '-', '1', '.', '2'}
248 };
249 struct { 341 struct {
250 struct type6_hdr hdr; 342 struct type6_hdr hdr;
251 struct CPRBX cprbx; 343 struct CPRBX cprbx;
@@ -272,12 +364,11 @@ static int ICACRT_msg_to_type6CRT_msgX(struct zcrypt_device *zdev,
272 msg->hdr.FromCardLen1 = PCIXCC_MAX_ICA_RESPONSE_SIZE - sizeof(msg->hdr); 364 msg->hdr.FromCardLen1 = PCIXCC_MAX_ICA_RESPONSE_SIZE - sizeof(msg->hdr);
273 365
274 msg->cprbx = static_cprbx; 366 msg->cprbx = static_cprbx;
275 msg->cprbx.domain = AP_QID_QUEUE(zdev->ap_dev->qid); 367 msg->cprbx.domain = AP_QID_QUEUE(zq->queue->qid);
276 msg->cprbx.req_parml = msg->cprbx.rpl_msgbl = 368 msg->cprbx.req_parml = msg->cprbx.rpl_msgbl =
277 size - sizeof(msg->hdr) - sizeof(msg->cprbx); 369 size - sizeof(msg->hdr) - sizeof(msg->cprbx);
278 370
279 msg->fr = (zdev->user_space_type == ZCRYPT_PCIXCC_MCL2) ? 371 msg->fr = static_pkd_fnr;
280 static_pkd_fnr_MCL2 : static_pkd_fnr;
281 372
282 ap_msg->length = size; 373 ap_msg->length = size;
283 return 0; 374 return 0;
@@ -286,7 +377,7 @@ static int ICACRT_msg_to_type6CRT_msgX(struct zcrypt_device *zdev,
286/** 377/**
287 * Convert a XCRB message to a type6 CPRB message. 378 * Convert a XCRB message to a type6 CPRB message.
288 * 379 *
289 * @zdev: crypto device pointer 380 * @zq: crypto device pointer
290 * @ap_msg: pointer to AP message 381 * @ap_msg: pointer to AP message
291 * @xcRB: pointer to user input data 382 * @xcRB: pointer to user input data
292 * 383 *
@@ -297,9 +388,10 @@ struct type86_fmt2_msg {
297 struct type86_fmt2_ext fmt2; 388 struct type86_fmt2_ext fmt2;
298} __packed; 389} __packed;
299 390
300static int XCRB_msg_to_type6CPRB_msgX(struct zcrypt_device *zdev, 391static int XCRB_msg_to_type6CPRB_msgX(struct ap_message *ap_msg,
301 struct ap_message *ap_msg, 392 struct ica_xcRB *xcRB,
302 struct ica_xcRB *xcRB) 393 unsigned int *fcode,
394 unsigned short **dom)
303{ 395{
304 static struct type6_hdr static_type6_hdrX = { 396 static struct type6_hdr static_type6_hdrX = {
305 .type = 0x06, 397 .type = 0x06,
@@ -379,6 +471,9 @@ static int XCRB_msg_to_type6CPRB_msgX(struct zcrypt_device *zdev,
379 memcpy(msg->hdr.function_code, function_code, 471 memcpy(msg->hdr.function_code, function_code,
380 sizeof(msg->hdr.function_code)); 472 sizeof(msg->hdr.function_code));
381 473
474 *fcode = (msg->hdr.function_code[0] << 8) | msg->hdr.function_code[1];
475 *dom = (unsigned short *)&msg->cprbx.domain;
476
382 if (memcmp(function_code, "US", 2) == 0) 477 if (memcmp(function_code, "US", 2) == 0)
383 ap_msg->special = 1; 478 ap_msg->special = 1;
384 else 479 else
@@ -389,15 +484,15 @@ static int XCRB_msg_to_type6CPRB_msgX(struct zcrypt_device *zdev,
389 copy_from_user(req_data, xcRB->request_data_address, 484 copy_from_user(req_data, xcRB->request_data_address,
390 xcRB->request_data_length)) 485 xcRB->request_data_length))
391 return -EFAULT; 486 return -EFAULT;
487
392 return 0; 488 return 0;
393} 489}
394 490
395static int xcrb_msg_to_type6_ep11cprb_msgx(struct zcrypt_device *zdev, 491static int xcrb_msg_to_type6_ep11cprb_msgx(struct ap_message *ap_msg,
396 struct ap_message *ap_msg, 492 struct ep11_urb *xcRB,
397 struct ep11_urb *xcRB) 493 unsigned int *fcode)
398{ 494{
399 unsigned int lfmt; 495 unsigned int lfmt;
400
401 static struct type6_hdr static_type6_ep11_hdr = { 496 static struct type6_hdr static_type6_ep11_hdr = {
402 .type = 0x06, 497 .type = 0x06,
403 .rqid = {0x00, 0x01}, 498 .rqid = {0x00, 0x01},
@@ -421,7 +516,7 @@ static int xcrb_msg_to_type6_ep11cprb_msgx(struct zcrypt_device *zdev,
421 unsigned char dom_tag; /* fixed value 0x4 */ 516 unsigned char dom_tag; /* fixed value 0x4 */
422 unsigned char dom_len; /* fixed value 0x4 */ 517 unsigned char dom_len; /* fixed value 0x4 */
423 unsigned int dom_val; /* domain id */ 518 unsigned int dom_val; /* domain id */
424 } __packed * payload_hdr; 519 } __packed * payload_hdr = NULL;
425 520
426 if (CEIL4(xcRB->req_len) < xcRB->req_len) 521 if (CEIL4(xcRB->req_len) < xcRB->req_len)
427 return -EINVAL; /* overflow after alignment*/ 522 return -EINVAL; /* overflow after alignment*/
@@ -450,43 +545,30 @@ static int xcrb_msg_to_type6_ep11cprb_msgx(struct zcrypt_device *zdev,
450 return -EFAULT; 545 return -EFAULT;
451 } 546 }
452 547
453 /* 548 if ((msg->pld_lenfmt & 0x80) == 0x80) { /*ext.len.fmt 2 or 3*/
454 The target domain field within the cprb body/payload block will be 549 switch (msg->pld_lenfmt & 0x03) {
455 replaced by the usage domain for non-management commands only. 550 case 1:
456 Therefore we check the first bit of the 'flags' parameter for 551 lfmt = 2;
457 management command indication. 552 break;
458 0 - non management command 553 case 2:
459 1 - management command 554 lfmt = 3;
460 */ 555 break;
461 if (!((msg->cprbx.flags & 0x80) == 0x80)) { 556 default:
462 msg->cprbx.target_id = (unsigned int) 557 return -EINVAL;
463 AP_QID_QUEUE(zdev->ap_dev->qid); 558 }
464 559 } else {
465 if ((msg->pld_lenfmt & 0x80) == 0x80) { /*ext.len.fmt 2 or 3*/ 560 lfmt = 1; /* length format #1 */
466 switch (msg->pld_lenfmt & 0x03) {
467 case 1:
468 lfmt = 2;
469 break;
470 case 2:
471 lfmt = 3;
472 break;
473 default:
474 return -EINVAL;
475 }
476 } else {
477 lfmt = 1; /* length format #1 */
478 }
479 payload_hdr = (struct pld_hdr *)((&(msg->pld_lenfmt))+lfmt);
480 payload_hdr->dom_val = (unsigned int)
481 AP_QID_QUEUE(zdev->ap_dev->qid);
482 } 561 }
562 payload_hdr = (struct pld_hdr *)((&(msg->pld_lenfmt))+lfmt);
563 *fcode = payload_hdr->func_val & 0xFFFF;
564
483 return 0; 565 return 0;
484} 566}
485 567
486/** 568/**
487 * Copy results from a type 86 ICA reply message back to user space. 569 * Copy results from a type 86 ICA reply message back to user space.
488 * 570 *
489 * @zdev: crypto device pointer 571 * @zq: crypto device pointer
490 * @reply: reply AP message. 572 * @reply: reply AP message.
491 * @data: pointer to user output data 573 * @data: pointer to user output data
492 * @length: size of user output data 574 * @length: size of user output data
@@ -508,7 +590,7 @@ struct type86_ep11_reply {
508 struct ep11_cprb cprbx; 590 struct ep11_cprb cprbx;
509} __packed; 591} __packed;
510 592
511static int convert_type86_ica(struct zcrypt_device *zdev, 593static int convert_type86_ica(struct zcrypt_queue *zq,
512 struct ap_message *reply, 594 struct ap_message *reply,
513 char __user *outputdata, 595 char __user *outputdata,
514 unsigned int outputdatalength) 596 unsigned int outputdatalength)
@@ -556,26 +638,37 @@ static int convert_type86_ica(struct zcrypt_device *zdev,
556 service_rc = msg->cprbx.ccp_rtcode; 638 service_rc = msg->cprbx.ccp_rtcode;
557 if (unlikely(service_rc != 0)) { 639 if (unlikely(service_rc != 0)) {
558 service_rs = msg->cprbx.ccp_rscode; 640 service_rs = msg->cprbx.ccp_rscode;
559 if (service_rc == 8 && service_rs == 66) 641 if ((service_rc == 8 && service_rs == 66) ||
560 return -EINVAL; 642 (service_rc == 8 && service_rs == 65) ||
561 if (service_rc == 8 && service_rs == 65) 643 (service_rc == 8 && service_rs == 72) ||
562 return -EINVAL; 644 (service_rc == 8 && service_rs == 770) ||
563 if (service_rc == 8 && service_rs == 770) 645 (service_rc == 12 && service_rs == 769)) {
646 ZCRYPT_DBF(DBF_DEBUG,
647 "device=%02x.%04x rc/rs=%d/%d => rc=EINVAL\n",
648 AP_QID_CARD(zq->queue->qid),
649 AP_QID_QUEUE(zq->queue->qid),
650 (int) service_rc, (int) service_rs);
564 return -EINVAL; 651 return -EINVAL;
652 }
565 if (service_rc == 8 && service_rs == 783) { 653 if (service_rc == 8 && service_rs == 783) {
566 zdev->min_mod_size = PCIXCC_MIN_MOD_SIZE_OLD; 654 zq->zcard->min_mod_size =
655 PCIXCC_MIN_MOD_SIZE_OLD;
656 ZCRYPT_DBF(DBF_DEBUG,
657 "device=%02x.%04x rc/rs=%d/%d => rc=EAGAIN\n",
658 AP_QID_CARD(zq->queue->qid),
659 AP_QID_QUEUE(zq->queue->qid),
660 (int) service_rc, (int) service_rs);
567 return -EAGAIN; 661 return -EAGAIN;
568 } 662 }
569 if (service_rc == 12 && service_rs == 769) 663 zq->online = 0;
570 return -EINVAL; 664 pr_err("Cryptographic device %02x.%04x failed and was set offline\n",
571 if (service_rc == 8 && service_rs == 72) 665 AP_QID_CARD(zq->queue->qid),
572 return -EINVAL; 666 AP_QID_QUEUE(zq->queue->qid));
573 zdev->online = 0; 667 ZCRYPT_DBF(DBF_ERR,
574 pr_err("Cryptographic device %x failed and was set offline\n", 668 "device=%02x.%04x rc/rs=%d/%d => online=0 rc=EAGAIN\n",
575 AP_QID_DEVICE(zdev->ap_dev->qid)); 669 AP_QID_CARD(zq->queue->qid),
576 ZCRYPT_DBF_DEV(DBF_ERR, zdev, "dev%04xo%drc%d", 670 AP_QID_QUEUE(zq->queue->qid),
577 AP_QID_DEVICE(zdev->ap_dev->qid), zdev->online, 671 (int) service_rc, (int) service_rs);
578 msg->hdr.reply_code);
579 return -EAGAIN; /* repeat the request on a different device. */ 672 return -EAGAIN; /* repeat the request on a different device. */
580 } 673 }
581 data = msg->text; 674 data = msg->text;
@@ -611,13 +704,13 @@ static int convert_type86_ica(struct zcrypt_device *zdev,
611/** 704/**
612 * Copy results from a type 86 XCRB reply message back to user space. 705 * Copy results from a type 86 XCRB reply message back to user space.
613 * 706 *
614 * @zdev: crypto device pointer 707 * @zq: crypto device pointer
615 * @reply: reply AP message. 708 * @reply: reply AP message.
616 * @xcRB: pointer to XCRB 709 * @xcRB: pointer to XCRB
617 * 710 *
618 * Returns 0 on success or -EINVAL, -EFAULT, -EAGAIN in case of an error. 711 * Returns 0 on success or -EINVAL, -EFAULT, -EAGAIN in case of an error.
619 */ 712 */
620static int convert_type86_xcrb(struct zcrypt_device *zdev, 713static int convert_type86_xcrb(struct zcrypt_queue *zq,
621 struct ap_message *reply, 714 struct ap_message *reply,
622 struct ica_xcRB *xcRB) 715 struct ica_xcRB *xcRB)
623{ 716{
@@ -642,13 +735,13 @@ static int convert_type86_xcrb(struct zcrypt_device *zdev,
642/** 735/**
643 * Copy results from a type 86 EP11 XCRB reply message back to user space. 736 * Copy results from a type 86 EP11 XCRB reply message back to user space.
644 * 737 *
645 * @zdev: crypto device pointer 738 * @zq: crypto device pointer
646 * @reply: reply AP message. 739 * @reply: reply AP message.
647 * @xcRB: pointer to EP11 user request block 740 * @xcRB: pointer to EP11 user request block
648 * 741 *
649 * Returns 0 on success or -EINVAL, -EFAULT, -EAGAIN in case of an error. 742 * Returns 0 on success or -EINVAL, -EFAULT, -EAGAIN in case of an error.
650 */ 743 */
651static int convert_type86_ep11_xcrb(struct zcrypt_device *zdev, 744static int convert_type86_ep11_xcrb(struct zcrypt_queue *zq,
652 struct ap_message *reply, 745 struct ap_message *reply,
653 struct ep11_urb *xcRB) 746 struct ep11_urb *xcRB)
654{ 747{
@@ -666,7 +759,7 @@ static int convert_type86_ep11_xcrb(struct zcrypt_device *zdev,
666 return 0; 759 return 0;
667} 760}
668 761
669static int convert_type86_rng(struct zcrypt_device *zdev, 762static int convert_type86_rng(struct zcrypt_queue *zq,
670 struct ap_message *reply, 763 struct ap_message *reply,
671 char *buffer) 764 char *buffer)
672{ 765{
@@ -683,104 +776,113 @@ static int convert_type86_rng(struct zcrypt_device *zdev,
683 return msg->fmt2.count2; 776 return msg->fmt2.count2;
684} 777}
685 778
686static int convert_response_ica(struct zcrypt_device *zdev, 779static int convert_response_ica(struct zcrypt_queue *zq,
687 struct ap_message *reply, 780 struct ap_message *reply,
688 char __user *outputdata, 781 char __user *outputdata,
689 unsigned int outputdatalength) 782 unsigned int outputdatalength)
690{ 783{
691 struct type86x_reply *msg = reply->message; 784 struct type86x_reply *msg = reply->message;
692 785
693 /* Response type byte is the second byte in the response. */ 786 switch (msg->hdr.type) {
694 switch (((unsigned char *) reply->message)[1]) {
695 case TYPE82_RSP_CODE: 787 case TYPE82_RSP_CODE:
696 case TYPE88_RSP_CODE: 788 case TYPE88_RSP_CODE:
697 return convert_error(zdev, reply); 789 return convert_error(zq, reply);
698 case TYPE86_RSP_CODE: 790 case TYPE86_RSP_CODE:
699 if (msg->cprbx.ccp_rtcode && 791 if (msg->cprbx.ccp_rtcode &&
700 (msg->cprbx.ccp_rscode == 0x14f) && 792 (msg->cprbx.ccp_rscode == 0x14f) &&
701 (outputdatalength > 256)) { 793 (outputdatalength > 256)) {
702 if (zdev->max_exp_bit_length <= 17) { 794 if (zq->zcard->max_exp_bit_length <= 17) {
703 zdev->max_exp_bit_length = 17; 795 zq->zcard->max_exp_bit_length = 17;
704 return -EAGAIN; 796 return -EAGAIN;
705 } else 797 } else
706 return -EINVAL; 798 return -EINVAL;
707 } 799 }
708 if (msg->hdr.reply_code) 800 if (msg->hdr.reply_code)
709 return convert_error(zdev, reply); 801 return convert_error(zq, reply);
710 if (msg->cprbx.cprb_ver_id == 0x02) 802 if (msg->cprbx.cprb_ver_id == 0x02)
711 return convert_type86_ica(zdev, reply, 803 return convert_type86_ica(zq, reply,
712 outputdata, outputdatalength); 804 outputdata, outputdatalength);
713 /* Fall through, no break, incorrect cprb version is an unknown 805 /* Fall through, no break, incorrect cprb version is an unknown
714 * response */ 806 * response */
715 default: /* Unknown response type, this should NEVER EVER happen */ 807 default: /* Unknown response type, this should NEVER EVER happen */
716 zdev->online = 0; 808 zq->online = 0;
717 pr_err("Cryptographic device %x failed and was set offline\n", 809 pr_err("Cryptographic device %02x.%04x failed and was set offline\n",
718 AP_QID_DEVICE(zdev->ap_dev->qid)); 810 AP_QID_CARD(zq->queue->qid),
719 ZCRYPT_DBF_DEV(DBF_ERR, zdev, "dev%04xo%dfail", 811 AP_QID_QUEUE(zq->queue->qid));
720 AP_QID_DEVICE(zdev->ap_dev->qid), zdev->online); 812 ZCRYPT_DBF(DBF_ERR,
813 "device=%02x.%04x rtype=0x%02x => online=0 rc=EAGAIN\n",
814 AP_QID_CARD(zq->queue->qid),
815 AP_QID_QUEUE(zq->queue->qid),
816 (int) msg->hdr.type);
721 return -EAGAIN; /* repeat the request on a different device. */ 817 return -EAGAIN; /* repeat the request on a different device. */
722 } 818 }
723} 819}
724 820
725static int convert_response_xcrb(struct zcrypt_device *zdev, 821static int convert_response_xcrb(struct zcrypt_queue *zq,
726 struct ap_message *reply, 822 struct ap_message *reply,
727 struct ica_xcRB *xcRB) 823 struct ica_xcRB *xcRB)
728{ 824{
729 struct type86x_reply *msg = reply->message; 825 struct type86x_reply *msg = reply->message;
730 826
731 /* Response type byte is the second byte in the response. */ 827 switch (msg->hdr.type) {
732 switch (((unsigned char *) reply->message)[1]) {
733 case TYPE82_RSP_CODE: 828 case TYPE82_RSP_CODE:
734 case TYPE88_RSP_CODE: 829 case TYPE88_RSP_CODE:
735 xcRB->status = 0x0008044DL; /* HDD_InvalidParm */ 830 xcRB->status = 0x0008044DL; /* HDD_InvalidParm */
736 return convert_error(zdev, reply); 831 return convert_error(zq, reply);
737 case TYPE86_RSP_CODE: 832 case TYPE86_RSP_CODE:
738 if (msg->hdr.reply_code) { 833 if (msg->hdr.reply_code) {
739 memcpy(&(xcRB->status), msg->fmt2.apfs, sizeof(u32)); 834 memcpy(&(xcRB->status), msg->fmt2.apfs, sizeof(u32));
740 return convert_error(zdev, reply); 835 return convert_error(zq, reply);
741 } 836 }
742 if (msg->cprbx.cprb_ver_id == 0x02) 837 if (msg->cprbx.cprb_ver_id == 0x02)
743 return convert_type86_xcrb(zdev, reply, xcRB); 838 return convert_type86_xcrb(zq, reply, xcRB);
744 /* Fall through, no break, incorrect cprb version is an unknown 839 /* Fall through, no break, incorrect cprb version is an unknown
745 * response */ 840 * response */
746 default: /* Unknown response type, this should NEVER EVER happen */ 841 default: /* Unknown response type, this should NEVER EVER happen */
747 xcRB->status = 0x0008044DL; /* HDD_InvalidParm */ 842 xcRB->status = 0x0008044DL; /* HDD_InvalidParm */
748 zdev->online = 0; 843 zq->online = 0;
749 pr_err("Cryptographic device %x failed and was set offline\n", 844 pr_err("Cryptographic device %02x.%04x failed and was set offline\n",
750 AP_QID_DEVICE(zdev->ap_dev->qid)); 845 AP_QID_CARD(zq->queue->qid),
751 ZCRYPT_DBF_DEV(DBF_ERR, zdev, "dev%04xo%dfail", 846 AP_QID_QUEUE(zq->queue->qid));
752 AP_QID_DEVICE(zdev->ap_dev->qid), zdev->online); 847 ZCRYPT_DBF(DBF_ERR,
848 "device=%02x.%04x rtype=0x%02x => online=0 rc=EAGAIN\n",
849 AP_QID_CARD(zq->queue->qid),
850 AP_QID_QUEUE(zq->queue->qid),
851 (int) msg->hdr.type);
753 return -EAGAIN; /* repeat the request on a different device. */ 852 return -EAGAIN; /* repeat the request on a different device. */
754 } 853 }
755} 854}
756 855
757static int convert_response_ep11_xcrb(struct zcrypt_device *zdev, 856static int convert_response_ep11_xcrb(struct zcrypt_queue *zq,
758 struct ap_message *reply, struct ep11_urb *xcRB) 857 struct ap_message *reply, struct ep11_urb *xcRB)
759{ 858{
760 struct type86_ep11_reply *msg = reply->message; 859 struct type86_ep11_reply *msg = reply->message;
761 860
762 /* Response type byte is the second byte in the response. */ 861 switch (msg->hdr.type) {
763 switch (((unsigned char *)reply->message)[1]) {
764 case TYPE82_RSP_CODE: 862 case TYPE82_RSP_CODE:
765 case TYPE87_RSP_CODE: 863 case TYPE87_RSP_CODE:
766 return convert_error(zdev, reply); 864 return convert_error(zq, reply);
767 case TYPE86_RSP_CODE: 865 case TYPE86_RSP_CODE:
768 if (msg->hdr.reply_code) 866 if (msg->hdr.reply_code)
769 return convert_error(zdev, reply); 867 return convert_error(zq, reply);
770 if (msg->cprbx.cprb_ver_id == 0x04) 868 if (msg->cprbx.cprb_ver_id == 0x04)
771 return convert_type86_ep11_xcrb(zdev, reply, xcRB); 869 return convert_type86_ep11_xcrb(zq, reply, xcRB);
772 /* Fall through, no break, incorrect cprb version is an unknown resp.*/ 870 /* Fall through, no break, incorrect cprb version is an unknown resp.*/
773 default: /* Unknown response type, this should NEVER EVER happen */ 871 default: /* Unknown response type, this should NEVER EVER happen */
774 zdev->online = 0; 872 zq->online = 0;
775 pr_err("Cryptographic device %x failed and was set offline\n", 873 pr_err("Cryptographic device %02x.%04x failed and was set offline\n",
776 AP_QID_DEVICE(zdev->ap_dev->qid)); 874 AP_QID_CARD(zq->queue->qid),
777 ZCRYPT_DBF_DEV(DBF_ERR, zdev, "dev%04xo%dfail", 875 AP_QID_QUEUE(zq->queue->qid));
778 AP_QID_DEVICE(zdev->ap_dev->qid), zdev->online); 876 ZCRYPT_DBF(DBF_ERR,
877 "device=%02x.%04x rtype=0x%02x => online=0 rc=EAGAIN\n",
878 AP_QID_CARD(zq->queue->qid),
879 AP_QID_QUEUE(zq->queue->qid),
880 (int) msg->hdr.type);
779 return -EAGAIN; /* repeat the request on a different device. */ 881 return -EAGAIN; /* repeat the request on a different device. */
780 } 882 }
781} 883}
782 884
783static int convert_response_rng(struct zcrypt_device *zdev, 885static int convert_response_rng(struct zcrypt_queue *zq,
784 struct ap_message *reply, 886 struct ap_message *reply,
785 char *data) 887 char *data)
786{ 888{
@@ -794,15 +896,19 @@ static int convert_response_rng(struct zcrypt_device *zdev,
794 if (msg->hdr.reply_code) 896 if (msg->hdr.reply_code)
795 return -EINVAL; 897 return -EINVAL;
796 if (msg->cprbx.cprb_ver_id == 0x02) 898 if (msg->cprbx.cprb_ver_id == 0x02)
797 return convert_type86_rng(zdev, reply, data); 899 return convert_type86_rng(zq, reply, data);
798 /* Fall through, no break, incorrect cprb version is an unknown 900 /* Fall through, no break, incorrect cprb version is an unknown
799 * response */ 901 * response */
800 default: /* Unknown response type, this should NEVER EVER happen */ 902 default: /* Unknown response type, this should NEVER EVER happen */
801 zdev->online = 0; 903 zq->online = 0;
802 pr_err("Cryptographic device %x failed and was set offline\n", 904 pr_err("Cryptographic device %02x.%04x failed and was set offline\n",
803 AP_QID_DEVICE(zdev->ap_dev->qid)); 905 AP_QID_CARD(zq->queue->qid),
804 ZCRYPT_DBF_DEV(DBF_ERR, zdev, "dev%04xo%dfail", 906 AP_QID_QUEUE(zq->queue->qid));
805 AP_QID_DEVICE(zdev->ap_dev->qid), zdev->online); 907 ZCRYPT_DBF(DBF_ERR,
908 "device=%02x.%04x rtype=0x%02x => online=0 rc=EAGAIN\n",
909 AP_QID_CARD(zq->queue->qid),
910 AP_QID_QUEUE(zq->queue->qid),
911 (int) msg->hdr.type);
806 return -EAGAIN; /* repeat the request on a different device. */ 912 return -EAGAIN; /* repeat the request on a different device. */
807 } 913 }
808} 914}
@@ -811,11 +917,11 @@ static int convert_response_rng(struct zcrypt_device *zdev,
811 * This function is called from the AP bus code after a crypto request 917 * This function is called from the AP bus code after a crypto request
812 * "msg" has finished with the reply message "reply". 918 * "msg" has finished with the reply message "reply".
813 * It is called from tasklet context. 919 * It is called from tasklet context.
814 * @ap_dev: pointer to the AP device 920 * @aq: pointer to the AP queue
815 * @msg: pointer to the AP message 921 * @msg: pointer to the AP message
816 * @reply: pointer to the AP reply message 922 * @reply: pointer to the AP reply message
817 */ 923 */
818static void zcrypt_msgtype6_receive(struct ap_device *ap_dev, 924static void zcrypt_msgtype6_receive(struct ap_queue *aq,
819 struct ap_message *msg, 925 struct ap_message *msg,
820 struct ap_message *reply) 926 struct ap_message *reply)
821{ 927{
@@ -860,11 +966,11 @@ out:
860 * This function is called from the AP bus code after a crypto request 966 * This function is called from the AP bus code after a crypto request
861 * "msg" has finished with the reply message "reply". 967 * "msg" has finished with the reply message "reply".
862 * It is called from tasklet context. 968 * It is called from tasklet context.
863 * @ap_dev: pointer to the AP device 969 * @aq: pointer to the AP queue
864 * @msg: pointer to the AP message 970 * @msg: pointer to the AP message
865 * @reply: pointer to the AP reply message 971 * @reply: pointer to the AP reply message
866 */ 972 */
867static void zcrypt_msgtype6_receive_ep11(struct ap_device *ap_dev, 973static void zcrypt_msgtype6_receive_ep11(struct ap_queue *aq,
868 struct ap_message *msg, 974 struct ap_message *msg,
869 struct ap_message *reply) 975 struct ap_message *reply)
870{ 976{
@@ -904,11 +1010,11 @@ static atomic_t zcrypt_step = ATOMIC_INIT(0);
904/** 1010/**
905 * The request distributor calls this function if it picked the PCIXCC/CEX2C 1011 * The request distributor calls this function if it picked the PCIXCC/CEX2C
906 * device to handle a modexpo request. 1012 * device to handle a modexpo request.
907 * @zdev: pointer to zcrypt_device structure that identifies the 1013 * @zq: pointer to zcrypt_queue structure that identifies the
908 * PCIXCC/CEX2C device to the request distributor 1014 * PCIXCC/CEX2C device to the request distributor
909 * @mex: pointer to the modexpo request buffer 1015 * @mex: pointer to the modexpo request buffer
910 */ 1016 */
911static long zcrypt_msgtype6_modexpo(struct zcrypt_device *zdev, 1017static long zcrypt_msgtype6_modexpo(struct zcrypt_queue *zq,
912 struct ica_rsa_modexpo *mex) 1018 struct ica_rsa_modexpo *mex)
913{ 1019{
914 struct ap_message ap_msg; 1020 struct ap_message ap_msg;
@@ -925,21 +1031,21 @@ static long zcrypt_msgtype6_modexpo(struct zcrypt_device *zdev,
925 ap_msg.psmid = (((unsigned long long) current->pid) << 32) + 1031 ap_msg.psmid = (((unsigned long long) current->pid) << 32) +
926 atomic_inc_return(&zcrypt_step); 1032 atomic_inc_return(&zcrypt_step);
927 ap_msg.private = &resp_type; 1033 ap_msg.private = &resp_type;
928 rc = ICAMEX_msg_to_type6MEX_msgX(zdev, &ap_msg, mex); 1034 rc = ICAMEX_msg_to_type6MEX_msgX(zq, &ap_msg, mex);
929 if (rc) 1035 if (rc)
930 goto out_free; 1036 goto out_free;
931 init_completion(&resp_type.work); 1037 init_completion(&resp_type.work);
932 ap_queue_message(zdev->ap_dev, &ap_msg); 1038 ap_queue_message(zq->queue, &ap_msg);
933 rc = wait_for_completion_interruptible(&resp_type.work); 1039 rc = wait_for_completion_interruptible(&resp_type.work);
934 if (rc == 0) { 1040 if (rc == 0) {
935 rc = ap_msg.rc; 1041 rc = ap_msg.rc;
936 if (rc == 0) 1042 if (rc == 0)
937 rc = convert_response_ica(zdev, &ap_msg, 1043 rc = convert_response_ica(zq, &ap_msg,
938 mex->outputdata, 1044 mex->outputdata,
939 mex->outputdatalength); 1045 mex->outputdatalength);
940 } else 1046 } else
941 /* Signal pending. */ 1047 /* Signal pending. */
942 ap_cancel_message(zdev->ap_dev, &ap_msg); 1048 ap_cancel_message(zq->queue, &ap_msg);
943out_free: 1049out_free:
944 free_page((unsigned long) ap_msg.message); 1050 free_page((unsigned long) ap_msg.message);
945 return rc; 1051 return rc;
@@ -948,11 +1054,11 @@ out_free:
948/** 1054/**
949 * The request distributor calls this function if it picked the PCIXCC/CEX2C 1055 * The request distributor calls this function if it picked the PCIXCC/CEX2C
950 * device to handle a modexpo_crt request. 1056 * device to handle a modexpo_crt request.
951 * @zdev: pointer to zcrypt_device structure that identifies the 1057 * @zq: pointer to zcrypt_queue structure that identifies the
952 * PCIXCC/CEX2C device to the request distributor 1058 * PCIXCC/CEX2C device to the request distributor
953 * @crt: pointer to the modexpoc_crt request buffer 1059 * @crt: pointer to the modexpoc_crt request buffer
954 */ 1060 */
955static long zcrypt_msgtype6_modexpo_crt(struct zcrypt_device *zdev, 1061static long zcrypt_msgtype6_modexpo_crt(struct zcrypt_queue *zq,
956 struct ica_rsa_modexpo_crt *crt) 1062 struct ica_rsa_modexpo_crt *crt)
957{ 1063{
958 struct ap_message ap_msg; 1064 struct ap_message ap_msg;
@@ -969,148 +1075,258 @@ static long zcrypt_msgtype6_modexpo_crt(struct zcrypt_device *zdev,
969 ap_msg.psmid = (((unsigned long long) current->pid) << 32) + 1075 ap_msg.psmid = (((unsigned long long) current->pid) << 32) +
970 atomic_inc_return(&zcrypt_step); 1076 atomic_inc_return(&zcrypt_step);
971 ap_msg.private = &resp_type; 1077 ap_msg.private = &resp_type;
972 rc = ICACRT_msg_to_type6CRT_msgX(zdev, &ap_msg, crt); 1078 rc = ICACRT_msg_to_type6CRT_msgX(zq, &ap_msg, crt);
973 if (rc) 1079 if (rc)
974 goto out_free; 1080 goto out_free;
975 init_completion(&resp_type.work); 1081 init_completion(&resp_type.work);
976 ap_queue_message(zdev->ap_dev, &ap_msg); 1082 ap_queue_message(zq->queue, &ap_msg);
977 rc = wait_for_completion_interruptible(&resp_type.work); 1083 rc = wait_for_completion_interruptible(&resp_type.work);
978 if (rc == 0) { 1084 if (rc == 0) {
979 rc = ap_msg.rc; 1085 rc = ap_msg.rc;
980 if (rc == 0) 1086 if (rc == 0)
981 rc = convert_response_ica(zdev, &ap_msg, 1087 rc = convert_response_ica(zq, &ap_msg,
982 crt->outputdata, 1088 crt->outputdata,
983 crt->outputdatalength); 1089 crt->outputdatalength);
984 } else 1090 } else {
985 /* Signal pending. */ 1091 /* Signal pending. */
986 ap_cancel_message(zdev->ap_dev, &ap_msg); 1092 ap_cancel_message(zq->queue, &ap_msg);
1093 }
987out_free: 1094out_free:
988 free_page((unsigned long) ap_msg.message); 1095 free_page((unsigned long) ap_msg.message);
989 return rc; 1096 return rc;
990} 1097}
991 1098
1099unsigned int get_cprb_fc(struct ica_xcRB *xcRB,
1100 struct ap_message *ap_msg,
1101 unsigned int *func_code, unsigned short **dom)
1102{
1103 struct response_type resp_type = {
1104 .type = PCIXCC_RESPONSE_TYPE_XCRB,
1105 };
1106 int rc;
1107
1108 ap_init_message(ap_msg);
1109 ap_msg->message = kmalloc(MSGTYPE06_MAX_MSG_SIZE, GFP_KERNEL);
1110 if (!ap_msg->message)
1111 return -ENOMEM;
1112 ap_msg->receive = zcrypt_msgtype6_receive;
1113 ap_msg->psmid = (((unsigned long long) current->pid) << 32) +
1114 atomic_inc_return(&zcrypt_step);
1115 ap_msg->private = kmalloc(sizeof(resp_type), GFP_KERNEL);
1116 if (!ap_msg->private) {
1117 kzfree(ap_msg->message);
1118 return -ENOMEM;
1119 }
1120 memcpy(ap_msg->private, &resp_type, sizeof(resp_type));
1121 rc = XCRB_msg_to_type6CPRB_msgX(ap_msg, xcRB, func_code, dom);
1122 if (rc) {
1123 kzfree(ap_msg->message);
1124 kzfree(ap_msg->private);
1125 }
1126 return rc;
1127}
1128
992/** 1129/**
993 * The request distributor calls this function if it picked the PCIXCC/CEX2C 1130 * The request distributor calls this function if it picked the PCIXCC/CEX2C
994 * device to handle a send_cprb request. 1131 * device to handle a send_cprb request.
995 * @zdev: pointer to zcrypt_device structure that identifies the 1132 * @zq: pointer to zcrypt_queue structure that identifies the
996 * PCIXCC/CEX2C device to the request distributor 1133 * PCIXCC/CEX2C device to the request distributor
997 * @xcRB: pointer to the send_cprb request buffer 1134 * @xcRB: pointer to the send_cprb request buffer
998 */ 1135 */
999static long zcrypt_msgtype6_send_cprb(struct zcrypt_device *zdev, 1136static long zcrypt_msgtype6_send_cprb(struct zcrypt_queue *zq,
1000 struct ica_xcRB *xcRB) 1137 struct ica_xcRB *xcRB,
1138 struct ap_message *ap_msg)
1001{ 1139{
1002 struct ap_message ap_msg;
1003 struct response_type resp_type = {
1004 .type = PCIXCC_RESPONSE_TYPE_XCRB,
1005 };
1006 int rc; 1140 int rc;
1141 struct response_type *rtype = (struct response_type *)(ap_msg->private);
1007 1142
1008 ap_init_message(&ap_msg); 1143 init_completion(&rtype->work);
1009 ap_msg.message = kmalloc(MSGTYPE06_MAX_MSG_SIZE, GFP_KERNEL); 1144 ap_queue_message(zq->queue, ap_msg);
1010 if (!ap_msg.message) 1145 rc = wait_for_completion_interruptible(&rtype->work);
1011 return -ENOMEM;
1012 ap_msg.receive = zcrypt_msgtype6_receive;
1013 ap_msg.psmid = (((unsigned long long) current->pid) << 32) +
1014 atomic_inc_return(&zcrypt_step);
1015 ap_msg.private = &resp_type;
1016 rc = XCRB_msg_to_type6CPRB_msgX(zdev, &ap_msg, xcRB);
1017 if (rc)
1018 goto out_free;
1019 init_completion(&resp_type.work);
1020 ap_queue_message(zdev->ap_dev, &ap_msg);
1021 rc = wait_for_completion_interruptible(&resp_type.work);
1022 if (rc == 0) { 1146 if (rc == 0) {
1023 rc = ap_msg.rc; 1147 rc = ap_msg->rc;
1024 if (rc == 0) 1148 if (rc == 0)
1025 rc = convert_response_xcrb(zdev, &ap_msg, xcRB); 1149 rc = convert_response_xcrb(zq, ap_msg, xcRB);
1026 } else 1150 } else
1027 /* Signal pending. */ 1151 /* Signal pending. */
1028 ap_cancel_message(zdev->ap_dev, &ap_msg); 1152 ap_cancel_message(zq->queue, ap_msg);
1029out_free: 1153
1030 kzfree(ap_msg.message); 1154 kzfree(ap_msg->message);
1155 kzfree(ap_msg->private);
1156 return rc;
1157}
1158
1159unsigned int get_ep11cprb_fc(struct ep11_urb *xcrb,
1160 struct ap_message *ap_msg,
1161 unsigned int *func_code)
1162{
1163 struct response_type resp_type = {
1164 .type = PCIXCC_RESPONSE_TYPE_EP11,
1165 };
1166 int rc;
1167
1168 ap_init_message(ap_msg);
1169 ap_msg->message = kmalloc(MSGTYPE06_MAX_MSG_SIZE, GFP_KERNEL);
1170 if (!ap_msg->message)
1171 return -ENOMEM;
1172 ap_msg->receive = zcrypt_msgtype6_receive_ep11;
1173 ap_msg->psmid = (((unsigned long long) current->pid) << 32) +
1174 atomic_inc_return(&zcrypt_step);
1175 ap_msg->private = kmalloc(sizeof(resp_type), GFP_KERNEL);
1176 if (!ap_msg->private) {
1177 kzfree(ap_msg->message);
1178 return -ENOMEM;
1179 }
1180 memcpy(ap_msg->private, &resp_type, sizeof(resp_type));
1181 rc = xcrb_msg_to_type6_ep11cprb_msgx(ap_msg, xcrb, func_code);
1182 if (rc) {
1183 kzfree(ap_msg->message);
1184 kzfree(ap_msg->private);
1185 }
1031 return rc; 1186 return rc;
1032} 1187}
1033 1188
1034/** 1189/**
1035 * The request distributor calls this function if it picked the CEX4P 1190 * The request distributor calls this function if it picked the CEX4P
1036 * device to handle a send_ep11_cprb request. 1191 * device to handle a send_ep11_cprb request.
1037 * @zdev: pointer to zcrypt_device structure that identifies the 1192 * @zq: pointer to zcrypt_queue structure that identifies the
1038 * CEX4P device to the request distributor 1193 * CEX4P device to the request distributor
1039 * @xcRB: pointer to the ep11 user request block 1194 * @xcRB: pointer to the ep11 user request block
1040 */ 1195 */
1041static long zcrypt_msgtype6_send_ep11_cprb(struct zcrypt_device *zdev, 1196static long zcrypt_msgtype6_send_ep11_cprb(struct zcrypt_queue *zq,
1042 struct ep11_urb *xcrb) 1197 struct ep11_urb *xcrb,
1198 struct ap_message *ap_msg)
1043{ 1199{
1044 struct ap_message ap_msg;
1045 struct response_type resp_type = {
1046 .type = PCIXCC_RESPONSE_TYPE_EP11,
1047 };
1048 int rc; 1200 int rc;
1201 unsigned int lfmt;
1202 struct response_type *rtype = (struct response_type *)(ap_msg->private);
1203 struct {
1204 struct type6_hdr hdr;
1205 struct ep11_cprb cprbx;
1206 unsigned char pld_tag; /* fixed value 0x30 */
1207 unsigned char pld_lenfmt; /* payload length format */
1208 } __packed * msg = ap_msg->message;
1209 struct pld_hdr {
1210 unsigned char func_tag; /* fixed value 0x4 */
1211 unsigned char func_len; /* fixed value 0x4 */
1212 unsigned int func_val; /* function ID */
1213 unsigned char dom_tag; /* fixed value 0x4 */
1214 unsigned char dom_len; /* fixed value 0x4 */
1215 unsigned int dom_val; /* domain id */
1216 } __packed * payload_hdr = NULL;
1049 1217
1050 ap_init_message(&ap_msg); 1218
1051 ap_msg.message = kmalloc(MSGTYPE06_MAX_MSG_SIZE, GFP_KERNEL); 1219 /**
1052 if (!ap_msg.message) 1220 * The target domain field within the cprb body/payload block will be
1053 return -ENOMEM; 1221 * replaced by the usage domain for non-management commands only.
1054 ap_msg.receive = zcrypt_msgtype6_receive_ep11; 1222 * Therefore we check the first bit of the 'flags' parameter for
1055 ap_msg.psmid = (((unsigned long long) current->pid) << 32) + 1223 * management command indication.
1056 atomic_inc_return(&zcrypt_step); 1224 * 0 - non management command
1057 ap_msg.private = &resp_type; 1225 * 1 - management command
1058 rc = xcrb_msg_to_type6_ep11cprb_msgx(zdev, &ap_msg, xcrb); 1226 */
1059 if (rc) 1227 if (!((msg->cprbx.flags & 0x80) == 0x80)) {
1060 goto out_free; 1228 msg->cprbx.target_id = (unsigned int)
1061 init_completion(&resp_type.work); 1229 AP_QID_QUEUE(zq->queue->qid);
1062 ap_queue_message(zdev->ap_dev, &ap_msg); 1230
1063 rc = wait_for_completion_interruptible(&resp_type.work); 1231 if ((msg->pld_lenfmt & 0x80) == 0x80) { /*ext.len.fmt 2 or 3*/
1232 switch (msg->pld_lenfmt & 0x03) {
1233 case 1:
1234 lfmt = 2;
1235 break;
1236 case 2:
1237 lfmt = 3;
1238 break;
1239 default:
1240 return -EINVAL;
1241 }
1242 } else {
1243 lfmt = 1; /* length format #1 */
1244 }
1245 payload_hdr = (struct pld_hdr *)((&(msg->pld_lenfmt))+lfmt);
1246 payload_hdr->dom_val = (unsigned int)
1247 AP_QID_QUEUE(zq->queue->qid);
1248 }
1249
1250 init_completion(&rtype->work);
1251 ap_queue_message(zq->queue, ap_msg);
1252 rc = wait_for_completion_interruptible(&rtype->work);
1064 if (rc == 0) { 1253 if (rc == 0) {
1065 rc = ap_msg.rc; 1254 rc = ap_msg->rc;
1066 if (rc == 0) 1255 if (rc == 0)
1067 rc = convert_response_ep11_xcrb(zdev, &ap_msg, xcrb); 1256 rc = convert_response_ep11_xcrb(zq, ap_msg, xcrb);
1068 } else 1257 } else
1069 /* Signal pending. */ 1258 /* Signal pending. */
1070 ap_cancel_message(zdev->ap_dev, &ap_msg); 1259 ap_cancel_message(zq->queue, ap_msg);
1071 1260
1072out_free: 1261 kzfree(ap_msg->message);
1073 kzfree(ap_msg.message); 1262 kzfree(ap_msg->private);
1074 return rc; 1263 return rc;
1075} 1264}
1076 1265
1266unsigned int get_rng_fc(struct ap_message *ap_msg, int *func_code,
1267 unsigned int *domain)
1268{
1269 struct response_type resp_type = {
1270 .type = PCIXCC_RESPONSE_TYPE_XCRB,
1271 };
1272
1273 ap_init_message(ap_msg);
1274 ap_msg->message = kmalloc(MSGTYPE06_MAX_MSG_SIZE, GFP_KERNEL);
1275 if (!ap_msg->message)
1276 return -ENOMEM;
1277 ap_msg->receive = zcrypt_msgtype6_receive;
1278 ap_msg->psmid = (((unsigned long long) current->pid) << 32) +
1279 atomic_inc_return(&zcrypt_step);
1280 ap_msg->private = kmalloc(sizeof(resp_type), GFP_KERNEL);
1281 if (!ap_msg->private) {
1282 kzfree(ap_msg->message);
1283 return -ENOMEM;
1284 }
1285 memcpy(ap_msg->private, &resp_type, sizeof(resp_type));
1286
1287 rng_type6CPRB_msgX(ap_msg, ZCRYPT_RNG_BUFFER_SIZE, domain);
1288
1289 *func_code = HWRNG;
1290 return 0;
1291}
1292
1077/** 1293/**
1078 * The request distributor calls this function if it picked the PCIXCC/CEX2C 1294 * The request distributor calls this function if it picked the PCIXCC/CEX2C
1079 * device to generate random data. 1295 * device to generate random data.
1080 * @zdev: pointer to zcrypt_device structure that identifies the 1296 * @zq: pointer to zcrypt_queue structure that identifies the
1081 * PCIXCC/CEX2C device to the request distributor 1297 * PCIXCC/CEX2C device to the request distributor
1082 * @buffer: pointer to a memory page to return random data 1298 * @buffer: pointer to a memory page to return random data
1083 */ 1299 */
1084 1300static long zcrypt_msgtype6_rng(struct zcrypt_queue *zq,
1085static long zcrypt_msgtype6_rng(struct zcrypt_device *zdev, 1301 char *buffer, struct ap_message *ap_msg)
1086 char *buffer)
1087{ 1302{
1088 struct ap_message ap_msg; 1303 struct {
1089 struct response_type resp_type = { 1304 struct type6_hdr hdr;
1090 .type = PCIXCC_RESPONSE_TYPE_XCRB, 1305 struct CPRBX cprbx;
1091 }; 1306 char function_code[2];
1307 short int rule_length;
1308 char rule[8];
1309 short int verb_length;
1310 short int key_length;
1311 } __packed * msg = ap_msg->message;
1312 struct response_type *rtype = (struct response_type *)(ap_msg->private);
1092 int rc; 1313 int rc;
1093 1314
1094 ap_init_message(&ap_msg); 1315 msg->cprbx.domain = AP_QID_QUEUE(zq->queue->qid);
1095 ap_msg.message = kmalloc(MSGTYPE06_MAX_MSG_SIZE, GFP_KERNEL); 1316
1096 if (!ap_msg.message) 1317 init_completion(&rtype->work);
1097 return -ENOMEM; 1318 ap_queue_message(zq->queue, ap_msg);
1098 ap_msg.receive = zcrypt_msgtype6_receive; 1319 rc = wait_for_completion_interruptible(&rtype->work);
1099 ap_msg.psmid = (((unsigned long long) current->pid) << 32) +
1100 atomic_inc_return(&zcrypt_step);
1101 ap_msg.private = &resp_type;
1102 rng_type6CPRB_msgX(zdev->ap_dev, &ap_msg, ZCRYPT_RNG_BUFFER_SIZE);
1103 init_completion(&resp_type.work);
1104 ap_queue_message(zdev->ap_dev, &ap_msg);
1105 rc = wait_for_completion_interruptible(&resp_type.work);
1106 if (rc == 0) { 1320 if (rc == 0) {
1107 rc = ap_msg.rc; 1321 rc = ap_msg->rc;
1108 if (rc == 0) 1322 if (rc == 0)
1109 rc = convert_response_rng(zdev, &ap_msg, buffer); 1323 rc = convert_response_rng(zq, ap_msg, buffer);
1110 } else 1324 } else
1111 /* Signal pending. */ 1325 /* Signal pending. */
1112 ap_cancel_message(zdev->ap_dev, &ap_msg); 1326 ap_cancel_message(zq->queue, ap_msg);
1113 kfree(ap_msg.message); 1327
1328 kzfree(ap_msg->message);
1329 kzfree(ap_msg->private);
1114 return rc; 1330 return rc;
1115} 1331}
1116 1332
@@ -1145,12 +1361,11 @@ static struct zcrypt_ops zcrypt_msgtype6_ep11_ops = {
1145 .send_ep11_cprb = zcrypt_msgtype6_send_ep11_cprb, 1361 .send_ep11_cprb = zcrypt_msgtype6_send_ep11_cprb,
1146}; 1362};
1147 1363
1148int __init zcrypt_msgtype6_init(void) 1364void __init zcrypt_msgtype6_init(void)
1149{ 1365{
1150 zcrypt_msgtype_register(&zcrypt_msgtype6_norng_ops); 1366 zcrypt_msgtype_register(&zcrypt_msgtype6_norng_ops);
1151 zcrypt_msgtype_register(&zcrypt_msgtype6_ops); 1367 zcrypt_msgtype_register(&zcrypt_msgtype6_ops);
1152 zcrypt_msgtype_register(&zcrypt_msgtype6_ep11_ops); 1368 zcrypt_msgtype_register(&zcrypt_msgtype6_ep11_ops);
1153 return 0;
1154} 1369}
1155 1370
1156void __exit zcrypt_msgtype6_exit(void) 1371void __exit zcrypt_msgtype6_exit(void)
@@ -1159,6 +1374,3 @@ void __exit zcrypt_msgtype6_exit(void)
1159 zcrypt_msgtype_unregister(&zcrypt_msgtype6_ops); 1374 zcrypt_msgtype_unregister(&zcrypt_msgtype6_ops);
1160 zcrypt_msgtype_unregister(&zcrypt_msgtype6_ep11_ops); 1375 zcrypt_msgtype_unregister(&zcrypt_msgtype6_ep11_ops);
1161} 1376}
1162
1163module_init(zcrypt_msgtype6_init);
1164module_exit(zcrypt_msgtype6_exit);
diff --git a/drivers/s390/crypto/zcrypt_msgtype6.h b/drivers/s390/crypto/zcrypt_msgtype6.h
index 207247570623..7a0d5b57821f 100644
--- a/drivers/s390/crypto/zcrypt_msgtype6.h
+++ b/drivers/s390/crypto/zcrypt_msgtype6.h
@@ -116,15 +116,28 @@ struct type86_fmt2_ext {
116 unsigned int offset4; /* 0x00000000 */ 116 unsigned int offset4; /* 0x00000000 */
117} __packed; 117} __packed;
118 118
119unsigned int get_cprb_fc(struct ica_xcRB *, struct ap_message *,
120 unsigned int *, unsigned short **);
121unsigned int get_ep11cprb_fc(struct ep11_urb *, struct ap_message *,
122 unsigned int *);
123unsigned int get_rng_fc(struct ap_message *, int *, unsigned int *);
124
125#define LOW 10
126#define MEDIUM 100
127#define HIGH 500
128
129int speed_idx_cca(int);
130int speed_idx_ep11(int);
131
119/** 132/**
120 * Prepare a type6 CPRB message for random number generation 133 * Prepare a type6 CPRB message for random number generation
121 * 134 *
122 * @ap_dev: AP device pointer 135 * @ap_dev: AP device pointer
123 * @ap_msg: pointer to AP message 136 * @ap_msg: pointer to AP message
124 */ 137 */
125static inline void rng_type6CPRB_msgX(struct ap_device *ap_dev, 138static inline void rng_type6CPRB_msgX(struct ap_message *ap_msg,
126 struct ap_message *ap_msg, 139 unsigned int random_number_length,
127 unsigned random_number_length) 140 unsigned int *domain)
128{ 141{
129 struct { 142 struct {
130 struct type6_hdr hdr; 143 struct type6_hdr hdr;
@@ -156,16 +169,16 @@ static inline void rng_type6CPRB_msgX(struct ap_device *ap_dev,
156 msg->hdr.FromCardLen2 = random_number_length, 169 msg->hdr.FromCardLen2 = random_number_length,
157 msg->cprbx = local_cprbx; 170 msg->cprbx = local_cprbx;
158 msg->cprbx.rpl_datal = random_number_length, 171 msg->cprbx.rpl_datal = random_number_length,
159 msg->cprbx.domain = AP_QID_QUEUE(ap_dev->qid);
160 memcpy(msg->function_code, msg->hdr.function_code, 0x02); 172 memcpy(msg->function_code, msg->hdr.function_code, 0x02);
161 msg->rule_length = 0x0a; 173 msg->rule_length = 0x0a;
162 memcpy(msg->rule, "RANDOM ", 8); 174 memcpy(msg->rule, "RANDOM ", 8);
163 msg->verb_length = 0x02; 175 msg->verb_length = 0x02;
164 msg->key_length = 0x02; 176 msg->key_length = 0x02;
165 ap_msg->length = sizeof(*msg); 177 ap_msg->length = sizeof(*msg);
178 *domain = (unsigned short)msg->cprbx.domain;
166} 179}
167 180
168int zcrypt_msgtype6_init(void); 181void zcrypt_msgtype6_init(void);
169void zcrypt_msgtype6_exit(void); 182void zcrypt_msgtype6_exit(void);
170 183
171#endif /* _ZCRYPT_MSGTYPE6_H_ */ 184#endif /* _ZCRYPT_MSGTYPE6_H_ */
diff --git a/drivers/s390/crypto/zcrypt_pcixcc.c b/drivers/s390/crypto/zcrypt_pcixcc.c
index df8f0c4dacb7..26ceaa696765 100644
--- a/drivers/s390/crypto/zcrypt_pcixcc.c
+++ b/drivers/s390/crypto/zcrypt_pcixcc.c
@@ -32,6 +32,7 @@
32#include <linux/slab.h> 32#include <linux/slab.h>
33#include <linux/atomic.h> 33#include <linux/atomic.h>
34#include <asm/uaccess.h> 34#include <asm/uaccess.h>
35#include <linux/mod_devicetable.h>
35 36
36#include "ap_bus.h" 37#include "ap_bus.h"
37#include "zcrypt_api.h" 38#include "zcrypt_api.h"
@@ -46,11 +47,6 @@
46#define CEX3C_MIN_MOD_SIZE PCIXCC_MIN_MOD_SIZE 47#define CEX3C_MIN_MOD_SIZE PCIXCC_MIN_MOD_SIZE
47#define CEX3C_MAX_MOD_SIZE 512 /* 4096 bits */ 48#define CEX3C_MAX_MOD_SIZE 512 /* 4096 bits */
48 49
49#define PCIXCC_MCL2_SPEED_RATING 7870
50#define PCIXCC_MCL3_SPEED_RATING 7870
51#define CEX2C_SPEED_RATING 7000
52#define CEX3C_SPEED_RATING 6500
53
54#define PCIXCC_MAX_ICA_MESSAGE_SIZE 0x77c /* max size type6 v2 crt message */ 50#define PCIXCC_MAX_ICA_MESSAGE_SIZE 0x77c /* max size type6 v2 crt message */
55#define PCIXCC_MAX_ICA_RESPONSE_SIZE 0x77c /* max size type86 v2 reply */ 51#define PCIXCC_MAX_ICA_RESPONSE_SIZE 0x77c /* max size type86 v2 reply */
56 52
@@ -67,142 +63,34 @@ struct response_type {
67#define PCIXCC_RESPONSE_TYPE_ICA 0 63#define PCIXCC_RESPONSE_TYPE_ICA 0
68#define PCIXCC_RESPONSE_TYPE_XCRB 1 64#define PCIXCC_RESPONSE_TYPE_XCRB 1
69 65
70static struct ap_device_id zcrypt_pcixcc_ids[] = {
71 { AP_DEVICE(AP_DEVICE_TYPE_PCIXCC) },
72 { AP_DEVICE(AP_DEVICE_TYPE_CEX2C) },
73 { AP_DEVICE(AP_DEVICE_TYPE_CEX3C) },
74 { /* end of list */ },
75};
76
77MODULE_DEVICE_TABLE(ap, zcrypt_pcixcc_ids);
78MODULE_AUTHOR("IBM Corporation"); 66MODULE_AUTHOR("IBM Corporation");
79MODULE_DESCRIPTION("PCIXCC Cryptographic Coprocessor device driver, " \ 67MODULE_DESCRIPTION("PCIXCC Cryptographic Coprocessor device driver, " \
80 "Copyright IBM Corp. 2001, 2012"); 68 "Copyright IBM Corp. 2001, 2012");
81MODULE_LICENSE("GPL"); 69MODULE_LICENSE("GPL");
82 70
83static int zcrypt_pcixcc_probe(struct ap_device *ap_dev); 71static struct ap_device_id zcrypt_pcixcc_card_ids[] = {
84static void zcrypt_pcixcc_remove(struct ap_device *ap_dev); 72 { .dev_type = AP_DEVICE_TYPE_PCIXCC,
85 73 .match_flags = AP_DEVICE_ID_MATCH_CARD_TYPE },
86static struct ap_driver zcrypt_pcixcc_driver = { 74 { .dev_type = AP_DEVICE_TYPE_CEX2C,
87 .probe = zcrypt_pcixcc_probe, 75 .match_flags = AP_DEVICE_ID_MATCH_CARD_TYPE },
88 .remove = zcrypt_pcixcc_remove, 76 { .dev_type = AP_DEVICE_TYPE_CEX3C,
89 .ids = zcrypt_pcixcc_ids, 77 .match_flags = AP_DEVICE_ID_MATCH_CARD_TYPE },
90 .request_timeout = PCIXCC_CLEANUP_TIME, 78 { /* end of list */ },
91}; 79};
92 80
93/** 81MODULE_DEVICE_TABLE(ap, zcrypt_pcixcc_card_ids);
94 * Micro-code detection function. Its sends a message to a pcixcc card
95 * to find out the microcode level.
96 * @ap_dev: pointer to the AP device.
97 */
98static int zcrypt_pcixcc_mcl(struct ap_device *ap_dev)
99{
100 static unsigned char msg[] = {
101 0x00,0x06,0x00,0x00,0x00,0x00,0x00,0x00,
102 0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,
103 0x00,0x00,0x00,0x58,0x00,0x00,0x00,0x00,
104 0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,
105 0x43,0x41,0x00,0x00,0x00,0x00,0x00,0x00,
106 0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,
107 0x00,0x00,0x00,0x00,0x50,0x4B,0x00,0x00,
108 0x00,0x00,0x01,0xC4,0x00,0x00,0x00,0x00,
109 0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,
110 0x00,0x00,0x07,0x24,0x00,0x00,0x00,0x00,
111 0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,
112 0x00,0xDC,0x02,0x00,0x00,0x00,0x54,0x32,
113 0x00,0x00,0x00,0x00,0x00,0x00,0x00,0xE8,
114 0x00,0x00,0x00,0x00,0x00,0x00,0x07,0x24,
115 0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,
116 0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,
117 0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,
118 0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,
119 0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,
120 0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,
121 0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,
122 0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,
123 0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,
124 0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,
125 0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,
126 0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,
127 0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,
128 0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,
129 0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,
130 0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,
131 0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,
132 0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,
133 0x00,0x00,0x00,0x04,0x00,0x00,0x00,0x00,
134 0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,
135 0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,
136 0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,
137 0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,
138 0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,
139 0x00,0x00,0x00,0x00,0x50,0x4B,0x00,0x0A,
140 0x4D,0x52,0x50,0x20,0x20,0x20,0x20,0x20,
141 0x00,0x42,0x00,0x01,0x02,0x03,0x04,0x05,
142 0x06,0x07,0x08,0x09,0x0A,0x0B,0x0C,0x0D,
143 0x0E,0x0F,0x00,0x11,0x22,0x33,0x44,0x55,
144 0x66,0x77,0x88,0x99,0xAA,0xBB,0xCC,0xDD,
145 0xEE,0xFF,0xFF,0xEE,0xDD,0xCC,0xBB,0xAA,
146 0x99,0x88,0x77,0x66,0x55,0x44,0x33,0x22,
147 0x11,0x00,0x01,0x23,0x45,0x67,0x89,0xAB,
148 0xCD,0xEF,0xFE,0xDC,0xBA,0x98,0x76,0x54,
149 0x32,0x10,0x00,0x9A,0x00,0x98,0x00,0x00,
150 0x1E,0x00,0x00,0x94,0x00,0x00,0x00,0x00,
151 0x04,0x00,0x00,0x8C,0x00,0x00,0x00,0x40,
152 0x02,0x00,0x00,0x40,0xBA,0xE8,0x23,0x3C,
153 0x75,0xF3,0x91,0x61,0xD6,0x73,0x39,0xCF,
154 0x7B,0x6D,0x8E,0x61,0x97,0x63,0x9E,0xD9,
155 0x60,0x55,0xD6,0xC7,0xEF,0xF8,0x1E,0x63,
156 0x95,0x17,0xCC,0x28,0x45,0x60,0x11,0xC5,
157 0xC4,0x4E,0x66,0xC6,0xE6,0xC3,0xDE,0x8A,
158 0x19,0x30,0xCF,0x0E,0xD7,0xAA,0xDB,0x01,
159 0xD8,0x00,0xBB,0x8F,0x39,0x9F,0x64,0x28,
160 0xF5,0x7A,0x77,0x49,0xCC,0x6B,0xA3,0x91,
161 0x97,0x70,0xE7,0x60,0x1E,0x39,0xE1,0xE5,
162 0x33,0xE1,0x15,0x63,0x69,0x08,0x80,0x4C,
163 0x67,0xC4,0x41,0x8F,0x48,0xDF,0x26,0x98,
164 0xF1,0xD5,0x8D,0x88,0xD9,0x6A,0xA4,0x96,
165 0xC5,0x84,0xD9,0x30,0x49,0x67,0x7D,0x19,
166 0xB1,0xB3,0x45,0x4D,0xB2,0x53,0x9A,0x47,
167 0x3C,0x7C,0x55,0xBF,0xCC,0x85,0x00,0x36,
168 0xF1,0x3D,0x93,0x53
169 };
170 unsigned long long psmid;
171 struct CPRBX *cprbx;
172 char *reply;
173 int rc, i;
174
175 reply = (void *) get_zeroed_page(GFP_KERNEL);
176 if (!reply)
177 return -ENOMEM;
178 82
179 rc = ap_send(ap_dev->qid, 0x0102030405060708ULL, msg, sizeof(msg)); 83static struct ap_device_id zcrypt_pcixcc_queue_ids[] = {
180 if (rc) 84 { .dev_type = AP_DEVICE_TYPE_PCIXCC,
181 goto out_free; 85 .match_flags = AP_DEVICE_ID_MATCH_QUEUE_TYPE },
182 86 { .dev_type = AP_DEVICE_TYPE_CEX2C,
183 /* Wait for the test message to complete. */ 87 .match_flags = AP_DEVICE_ID_MATCH_QUEUE_TYPE },
184 for (i = 0; i < 6; i++) { 88 { .dev_type = AP_DEVICE_TYPE_CEX3C,
185 msleep(300); 89 .match_flags = AP_DEVICE_ID_MATCH_QUEUE_TYPE },
186 rc = ap_recv(ap_dev->qid, &psmid, reply, 4096); 90 { /* end of list */ },
187 if (rc == 0 && psmid == 0x0102030405060708ULL) 91};
188 break;
189 }
190
191 if (i >= 6) {
192 /* Got no answer. */
193 rc = -ENODEV;
194 goto out_free;
195 }
196 92
197 cprbx = (struct CPRBX *) (reply + 48); 93MODULE_DEVICE_TABLE(ap, zcrypt_pcixcc_queue_ids);
198 if (cprbx->ccp_rtcode == 8 && cprbx->ccp_rscode == 33)
199 rc = ZCRYPT_PCIXCC_MCL2;
200 else
201 rc = ZCRYPT_PCIXCC_MCL3;
202out_free:
203 free_page((unsigned long) reply);
204 return rc;
205}
206 94
207/** 95/**
208 * Large random number detection function. Its sends a message to a pcixcc 96 * Large random number detection function. Its sends a message to a pcixcc
@@ -211,15 +99,25 @@ out_free:
211 * 99 *
212 * Returns 1 if large random numbers are supported, 0 if not and < 0 on error. 100 * Returns 1 if large random numbers are supported, 0 if not and < 0 on error.
213 */ 101 */
214static int zcrypt_pcixcc_rng_supported(struct ap_device *ap_dev) 102static int zcrypt_pcixcc_rng_supported(struct ap_queue *aq)
215{ 103{
216 struct ap_message ap_msg; 104 struct ap_message ap_msg;
217 unsigned long long psmid; 105 unsigned long long psmid;
106 unsigned int domain;
218 struct { 107 struct {
219 struct type86_hdr hdr; 108 struct type86_hdr hdr;
220 struct type86_fmt2_ext fmt2; 109 struct type86_fmt2_ext fmt2;
221 struct CPRBX cprbx; 110 struct CPRBX cprbx;
222 } __attribute__((packed)) *reply; 111 } __attribute__((packed)) *reply;
112 struct {
113 struct type6_hdr hdr;
114 struct CPRBX cprbx;
115 char function_code[2];
116 short int rule_length;
117 char rule[8];
118 short int verb_length;
119 short int key_length;
120 } __packed * msg;
223 int rc, i; 121 int rc, i;
224 122
225 ap_init_message(&ap_msg); 123 ap_init_message(&ap_msg);
@@ -227,8 +125,12 @@ static int zcrypt_pcixcc_rng_supported(struct ap_device *ap_dev)
227 if (!ap_msg.message) 125 if (!ap_msg.message)
228 return -ENOMEM; 126 return -ENOMEM;
229 127
230 rng_type6CPRB_msgX(ap_dev, &ap_msg, 4); 128 rng_type6CPRB_msgX(&ap_msg, 4, &domain);
231 rc = ap_send(ap_dev->qid, 0x0102030405060708ULL, ap_msg.message, 129
130 msg = ap_msg.message;
131 msg->cprbx.domain = AP_QID_QUEUE(aq->qid);
132
133 rc = ap_send(aq->qid, 0x0102030405060708ULL, ap_msg.message,
232 ap_msg.length); 134 ap_msg.length);
233 if (rc) 135 if (rc)
234 goto out_free; 136 goto out_free;
@@ -236,7 +138,7 @@ static int zcrypt_pcixcc_rng_supported(struct ap_device *ap_dev)
236 /* Wait for the test message to complete. */ 138 /* Wait for the test message to complete. */
237 for (i = 0; i < 2 * HZ; i++) { 139 for (i = 0; i < 2 * HZ; i++) {
238 msleep(1000 / HZ); 140 msleep(1000 / HZ);
239 rc = ap_recv(ap_dev->qid, &psmid, ap_msg.message, 4096); 141 rc = ap_recv(aq->qid, &psmid, ap_msg.message, 4096);
240 if (rc == 0 && psmid == 0x0102030405060708ULL) 142 if (rc == 0 && psmid == 0x0102030405060708ULL)
241 break; 143 break;
242 } 144 }
@@ -258,110 +160,168 @@ out_free:
258} 160}
259 161
260/** 162/**
261 * Probe function for PCIXCC/CEX2C cards. It always accepts the AP device 163 * Probe function for PCIXCC/CEX2C card devices. It always accepts the
262 * since the bus_match already checked the hardware type. The PCIXCC 164 * AP device since the bus_match already checked the hardware type. The
263 * cards come in two flavours: micro code level 2 and micro code level 3. 165 * PCIXCC cards come in two flavours: micro code level 2 and micro code
264 * This is checked by sending a test message to the device. 166 * level 3. This is checked by sending a test message to the device.
265 * @ap_dev: pointer to the AP device. 167 * @ap_dev: pointer to the AP card device.
266 */ 168 */
267static int zcrypt_pcixcc_probe(struct ap_device *ap_dev) 169static int zcrypt_pcixcc_card_probe(struct ap_device *ap_dev)
268{ 170{
269 struct zcrypt_device *zdev; 171 /*
172 * Normalized speed ratings per crypto adapter
173 * MEX_1k, MEX_2k, MEX_4k, CRT_1k, CRT_2k, CRT_4k, RNG, SECKEY
174 */
175 static const int CEX2C_SPEED_IDX[] = {
176 1000, 1400, 2400, 1100, 1500, 2600, 100, 12};
177 static const int CEX3C_SPEED_IDX[] = {
178 500, 700, 1400, 550, 800, 1500, 80, 10};
179
180 struct ap_card *ac = to_ap_card(&ap_dev->device);
181 struct zcrypt_card *zc;
270 int rc = 0; 182 int rc = 0;
271 183
272 zdev = zcrypt_device_alloc(PCIXCC_MAX_XCRB_MESSAGE_SIZE); 184 zc = zcrypt_card_alloc();
273 if (!zdev) 185 if (!zc)
274 return -ENOMEM; 186 return -ENOMEM;
275 zdev->ap_dev = ap_dev; 187 zc->card = ac;
276 zdev->online = 1; 188 ac->private = zc;
277 switch (ap_dev->device_type) { 189 switch (ac->ap_dev.device_type) {
278 case AP_DEVICE_TYPE_PCIXCC:
279 rc = zcrypt_pcixcc_mcl(ap_dev);
280 if (rc < 0) {
281 zcrypt_device_free(zdev);
282 return rc;
283 }
284 zdev->user_space_type = rc;
285 if (rc == ZCRYPT_PCIXCC_MCL2) {
286 zdev->type_string = "PCIXCC_MCL2";
287 zdev->speed_rating = PCIXCC_MCL2_SPEED_RATING;
288 zdev->min_mod_size = PCIXCC_MIN_MOD_SIZE_OLD;
289 zdev->max_mod_size = PCIXCC_MAX_MOD_SIZE;
290 zdev->max_exp_bit_length = PCIXCC_MAX_MOD_SIZE;
291 } else {
292 zdev->type_string = "PCIXCC_MCL3";
293 zdev->speed_rating = PCIXCC_MCL3_SPEED_RATING;
294 zdev->min_mod_size = PCIXCC_MIN_MOD_SIZE;
295 zdev->max_mod_size = PCIXCC_MAX_MOD_SIZE;
296 zdev->max_exp_bit_length = PCIXCC_MAX_MOD_SIZE;
297 }
298 break;
299 case AP_DEVICE_TYPE_CEX2C: 190 case AP_DEVICE_TYPE_CEX2C:
300 zdev->user_space_type = ZCRYPT_CEX2C; 191 zc->user_space_type = ZCRYPT_CEX2C;
301 zdev->type_string = "CEX2C"; 192 zc->type_string = "CEX2C";
302 zdev->speed_rating = CEX2C_SPEED_RATING; 193 memcpy(zc->speed_rating, CEX2C_SPEED_IDX,
303 zdev->min_mod_size = PCIXCC_MIN_MOD_SIZE; 194 sizeof(CEX2C_SPEED_IDX));
304 zdev->max_mod_size = PCIXCC_MAX_MOD_SIZE; 195 zc->min_mod_size = PCIXCC_MIN_MOD_SIZE;
305 zdev->max_exp_bit_length = PCIXCC_MAX_MOD_SIZE; 196 zc->max_mod_size = PCIXCC_MAX_MOD_SIZE;
197 zc->max_exp_bit_length = PCIXCC_MAX_MOD_SIZE;
306 break; 198 break;
307 case AP_DEVICE_TYPE_CEX3C: 199 case AP_DEVICE_TYPE_CEX3C:
308 zdev->user_space_type = ZCRYPT_CEX3C; 200 zc->user_space_type = ZCRYPT_CEX3C;
309 zdev->type_string = "CEX3C"; 201 zc->type_string = "CEX3C";
310 zdev->speed_rating = CEX3C_SPEED_RATING; 202 memcpy(zc->speed_rating, CEX3C_SPEED_IDX,
311 zdev->min_mod_size = CEX3C_MIN_MOD_SIZE; 203 sizeof(CEX3C_SPEED_IDX));
312 zdev->max_mod_size = CEX3C_MAX_MOD_SIZE; 204 zc->min_mod_size = CEX3C_MIN_MOD_SIZE;
313 zdev->max_exp_bit_length = CEX3C_MAX_MOD_SIZE; 205 zc->max_mod_size = CEX3C_MAX_MOD_SIZE;
206 zc->max_exp_bit_length = CEX3C_MAX_MOD_SIZE;
314 break; 207 break;
315 default: 208 default:
316 goto out_free; 209 zcrypt_card_free(zc);
210 return -ENODEV;
317 } 211 }
212 zc->online = 1;
213
214 rc = zcrypt_card_register(zc);
215 if (rc) {
216 ac->private = NULL;
217 zcrypt_card_free(zc);
218 }
219
220 return rc;
221}
318 222
319 rc = zcrypt_pcixcc_rng_supported(ap_dev); 223/**
224 * This is called to remove the PCIXCC/CEX2C card driver information
225 * if an AP card device is removed.
226 */
227static void zcrypt_pcixcc_card_remove(struct ap_device *ap_dev)
228{
229 struct zcrypt_card *zc = to_ap_card(&ap_dev->device)->private;
230
231 if (zc)
232 zcrypt_card_unregister(zc);
233}
234
235static struct ap_driver zcrypt_pcixcc_card_driver = {
236 .probe = zcrypt_pcixcc_card_probe,
237 .remove = zcrypt_pcixcc_card_remove,
238 .ids = zcrypt_pcixcc_card_ids,
239};
240
241/**
242 * Probe function for PCIXCC/CEX2C queue devices. It always accepts the
243 * AP device since the bus_match already checked the hardware type. The
244 * PCIXCC cards come in two flavours: micro code level 2 and micro code
245 * level 3. This is checked by sending a test message to the device.
246 * @ap_dev: pointer to the AP card device.
247 */
248static int zcrypt_pcixcc_queue_probe(struct ap_device *ap_dev)
249{
250 struct ap_queue *aq = to_ap_queue(&ap_dev->device);
251 struct zcrypt_queue *zq;
252 int rc;
253
254 zq = zcrypt_queue_alloc(PCIXCC_MAX_XCRB_MESSAGE_SIZE);
255 if (!zq)
256 return -ENOMEM;
257 zq->queue = aq;
258 zq->online = 1;
259 atomic_set(&zq->load, 0);
260 rc = zcrypt_pcixcc_rng_supported(aq);
320 if (rc < 0) { 261 if (rc < 0) {
321 zcrypt_device_free(zdev); 262 zcrypt_queue_free(zq);
322 return rc; 263 return rc;
323 } 264 }
324 if (rc) 265 if (rc)
325 zdev->ops = zcrypt_msgtype_request(MSGTYPE06_NAME, 266 zq->ops = zcrypt_msgtype(MSGTYPE06_NAME,
326 MSGTYPE06_VARIANT_DEFAULT); 267 MSGTYPE06_VARIANT_DEFAULT);
327 else 268 else
328 zdev->ops = zcrypt_msgtype_request(MSGTYPE06_NAME, 269 zq->ops = zcrypt_msgtype(MSGTYPE06_NAME,
329 MSGTYPE06_VARIANT_NORNG); 270 MSGTYPE06_VARIANT_NORNG);
330 ap_device_init_reply(ap_dev, &zdev->reply); 271 ap_queue_init_reply(aq, &zq->reply);
331 ap_dev->private = zdev; 272 aq->request_timeout = PCIXCC_CLEANUP_TIME,
332 rc = zcrypt_device_register(zdev); 273 aq->private = zq;
333 if (rc) 274 rc = zcrypt_queue_register(zq);
334 goto out_free; 275 if (rc) {
335 return 0; 276 aq->private = NULL;
336 277 zcrypt_queue_free(zq);
337 out_free: 278 }
338 ap_dev->private = NULL;
339 zcrypt_msgtype_release(zdev->ops);
340 zcrypt_device_free(zdev);
341 return rc; 279 return rc;
342} 280}
343 281
344/** 282/**
345 * This is called to remove the extended PCIXCC/CEX2C driver information 283 * This is called to remove the PCIXCC/CEX2C queue driver information
346 * if an AP device is removed. 284 * if an AP queue device is removed.
347 */ 285 */
348static void zcrypt_pcixcc_remove(struct ap_device *ap_dev) 286static void zcrypt_pcixcc_queue_remove(struct ap_device *ap_dev)
349{ 287{
350 struct zcrypt_device *zdev = ap_dev->private; 288 struct ap_queue *aq = to_ap_queue(&ap_dev->device);
351 struct zcrypt_ops *zops = zdev->ops; 289 struct zcrypt_queue *zq = aq->private;
352 290
353 zcrypt_device_unregister(zdev); 291 ap_queue_remove(aq);
354 zcrypt_msgtype_release(zops); 292 if (zq)
293 zcrypt_queue_unregister(zq);
355} 294}
356 295
296static struct ap_driver zcrypt_pcixcc_queue_driver = {
297 .probe = zcrypt_pcixcc_queue_probe,
298 .remove = zcrypt_pcixcc_queue_remove,
299 .suspend = ap_queue_suspend,
300 .resume = ap_queue_resume,
301 .ids = zcrypt_pcixcc_queue_ids,
302};
303
357int __init zcrypt_pcixcc_init(void) 304int __init zcrypt_pcixcc_init(void)
358{ 305{
359 return ap_driver_register(&zcrypt_pcixcc_driver, THIS_MODULE, "pcixcc"); 306 int rc;
307
308 rc = ap_driver_register(&zcrypt_pcixcc_card_driver,
309 THIS_MODULE, "pcixcccard");
310 if (rc)
311 return rc;
312
313 rc = ap_driver_register(&zcrypt_pcixcc_queue_driver,
314 THIS_MODULE, "pcixccqueue");
315 if (rc)
316 ap_driver_unregister(&zcrypt_pcixcc_card_driver);
317
318 return rc;
360} 319}
361 320
362void zcrypt_pcixcc_exit(void) 321void zcrypt_pcixcc_exit(void)
363{ 322{
364 ap_driver_unregister(&zcrypt_pcixcc_driver); 323 ap_driver_unregister(&zcrypt_pcixcc_queue_driver);
324 ap_driver_unregister(&zcrypt_pcixcc_card_driver);
365} 325}
366 326
367module_init(zcrypt_pcixcc_init); 327module_init(zcrypt_pcixcc_init);
diff --git a/drivers/s390/crypto/zcrypt_queue.c b/drivers/s390/crypto/zcrypt_queue.c
new file mode 100644
index 000000000000..a303f3b2c328
--- /dev/null
+++ b/drivers/s390/crypto/zcrypt_queue.c
@@ -0,0 +1,226 @@
1/*
2 * zcrypt 2.1.0
3 *
4 * Copyright IBM Corp. 2001, 2012
5 * Author(s): Robert Burroughs
6 * Eric Rossman (edrossma@us.ibm.com)
7 * Cornelia Huck <cornelia.huck@de.ibm.com>
8 *
9 * Hotplug & misc device support: Jochen Roehrig (roehrig@de.ibm.com)
10 * Major cleanup & driver split: Martin Schwidefsky <schwidefsky@de.ibm.com>
11 * Ralph Wuerthner <rwuerthn@de.ibm.com>
12 * MSGTYPE restruct: Holger Dengler <hd@linux.vnet.ibm.com>
13 *
14 * This program is free software; you can redistribute it and/or modify
15 * it under the terms of the GNU General Public License as published by
16 * the Free Software Foundation; either version 2, or (at your option)
17 * any later version.
18 *
19 * This program is distributed in the hope that it will be useful,
20 * but WITHOUT ANY WARRANTY; without even the implied warranty of
21 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
22 * GNU General Public License for more details.
23 */
24
25#include <linux/module.h>
26#include <linux/init.h>
27#include <linux/interrupt.h>
28#include <linux/miscdevice.h>
29#include <linux/fs.h>
30#include <linux/proc_fs.h>
31#include <linux/seq_file.h>
32#include <linux/compat.h>
33#include <linux/slab.h>
34#include <linux/atomic.h>
35#include <linux/uaccess.h>
36#include <linux/hw_random.h>
37#include <linux/debugfs.h>
38#include <asm/debug.h>
39
40#include "zcrypt_debug.h"
41#include "zcrypt_api.h"
42
43#include "zcrypt_msgtype6.h"
44#include "zcrypt_msgtype50.h"
45
46/*
47 * Device attributes common for all crypto queue devices.
48 */
49
50static ssize_t zcrypt_queue_online_show(struct device *dev,
51 struct device_attribute *attr,
52 char *buf)
53{
54 struct zcrypt_queue *zq = to_ap_queue(dev)->private;
55
56 return snprintf(buf, PAGE_SIZE, "%d\n", zq->online);
57}
58
59static ssize_t zcrypt_queue_online_store(struct device *dev,
60 struct device_attribute *attr,
61 const char *buf, size_t count)
62{
63 struct zcrypt_queue *zq = to_ap_queue(dev)->private;
64 struct zcrypt_card *zc = zq->zcard;
65 int online;
66
67 if (sscanf(buf, "%d\n", &online) != 1 || online < 0 || online > 1)
68 return -EINVAL;
69
70 if (online && !zc->online)
71 return -EINVAL;
72 zq->online = online;
73
74 ZCRYPT_DBF(DBF_INFO, "queue=%02x.%04x online=%d\n",
75 AP_QID_CARD(zq->queue->qid),
76 AP_QID_QUEUE(zq->queue->qid),
77 online);
78
79 if (!online)
80 ap_flush_queue(zq->queue);
81 return count;
82}
83
84static DEVICE_ATTR(online, 0644, zcrypt_queue_online_show,
85 zcrypt_queue_online_store);
86
87static struct attribute *zcrypt_queue_attrs[] = {
88 &dev_attr_online.attr,
89 NULL,
90};
91
92static struct attribute_group zcrypt_queue_attr_group = {
93 .attrs = zcrypt_queue_attrs,
94};
95
96void zcrypt_queue_force_online(struct zcrypt_queue *zq, int online)
97{
98 zq->online = online;
99 if (!online)
100 ap_flush_queue(zq->queue);
101}
102
103struct zcrypt_queue *zcrypt_queue_alloc(size_t max_response_size)
104{
105 struct zcrypt_queue *zq;
106
107 zq = kzalloc(sizeof(struct zcrypt_queue), GFP_KERNEL);
108 if (!zq)
109 return NULL;
110 zq->reply.message = kmalloc(max_response_size, GFP_KERNEL);
111 if (!zq->reply.message)
112 goto out_free;
113 zq->reply.length = max_response_size;
114 INIT_LIST_HEAD(&zq->list);
115 kref_init(&zq->refcount);
116 return zq;
117
118out_free:
119 kfree(zq);
120 return NULL;
121}
122EXPORT_SYMBOL(zcrypt_queue_alloc);
123
124void zcrypt_queue_free(struct zcrypt_queue *zq)
125{
126 kfree(zq->reply.message);
127 kfree(zq);
128}
129EXPORT_SYMBOL(zcrypt_queue_free);
130
131static void zcrypt_queue_release(struct kref *kref)
132{
133 struct zcrypt_queue *zq =
134 container_of(kref, struct zcrypt_queue, refcount);
135 zcrypt_queue_free(zq);
136}
137
138void zcrypt_queue_get(struct zcrypt_queue *zq)
139{
140 kref_get(&zq->refcount);
141}
142EXPORT_SYMBOL(zcrypt_queue_get);
143
144int zcrypt_queue_put(struct zcrypt_queue *zq)
145{
146 return kref_put(&zq->refcount, zcrypt_queue_release);
147}
148EXPORT_SYMBOL(zcrypt_queue_put);
149
150/**
151 * zcrypt_queue_register() - Register a crypto queue device.
152 * @zq: Pointer to a crypto queue device
153 *
154 * Register a crypto queue device. Returns 0 if successful.
155 */
156int zcrypt_queue_register(struct zcrypt_queue *zq)
157{
158 struct zcrypt_card *zc;
159 int rc;
160
161 spin_lock(&zcrypt_list_lock);
162 zc = zq->queue->card->private;
163 zcrypt_card_get(zc);
164 zq->zcard = zc;
165 zq->online = 1; /* New devices are online by default. */
166
167 ZCRYPT_DBF(DBF_INFO, "queue=%02x.%04x register online=1\n",
168 AP_QID_CARD(zq->queue->qid), AP_QID_QUEUE(zq->queue->qid));
169
170 list_add_tail(&zq->list, &zc->zqueues);
171 zcrypt_device_count++;
172 spin_unlock(&zcrypt_list_lock);
173
174 rc = sysfs_create_group(&zq->queue->ap_dev.device.kobj,
175 &zcrypt_queue_attr_group);
176 if (rc)
177 goto out;
178 get_device(&zq->queue->ap_dev.device);
179
180 if (zq->ops->rng) {
181 rc = zcrypt_rng_device_add();
182 if (rc)
183 goto out_unregister;
184 }
185 return 0;
186
187out_unregister:
188 sysfs_remove_group(&zq->queue->ap_dev.device.kobj,
189 &zcrypt_queue_attr_group);
190 put_device(&zq->queue->ap_dev.device);
191out:
192 spin_lock(&zcrypt_list_lock);
193 list_del_init(&zq->list);
194 spin_unlock(&zcrypt_list_lock);
195 zcrypt_card_put(zc);
196 return rc;
197}
198EXPORT_SYMBOL(zcrypt_queue_register);
199
200/**
201 * zcrypt_queue_unregister(): Unregister a crypto queue device.
202 * @zq: Pointer to crypto queue device
203 *
204 * Unregister a crypto queue device.
205 */
206void zcrypt_queue_unregister(struct zcrypt_queue *zq)
207{
208 struct zcrypt_card *zc;
209
210 ZCRYPT_DBF(DBF_INFO, "queue=%02x.%04x unregister\n",
211 AP_QID_CARD(zq->queue->qid), AP_QID_QUEUE(zq->queue->qid));
212
213 zc = zq->zcard;
214 spin_lock(&zcrypt_list_lock);
215 list_del_init(&zq->list);
216 zcrypt_device_count--;
217 spin_unlock(&zcrypt_list_lock);
218 zcrypt_card_put(zc);
219 if (zq->ops->rng)
220 zcrypt_rng_device_remove();
221 sysfs_remove_group(&zq->queue->ap_dev.device.kobj,
222 &zcrypt_queue_attr_group);
223 put_device(&zq->queue->ap_dev.device);
224 zcrypt_queue_put(zq);
225}
226EXPORT_SYMBOL(zcrypt_queue_unregister);
diff --git a/include/linux/mod_devicetable.h b/include/linux/mod_devicetable.h
index ed84c07f6a51..8a57f0b1242d 100644
--- a/include/linux/mod_devicetable.h
+++ b/include/linux/mod_devicetable.h
@@ -175,7 +175,8 @@ struct ap_device_id {
175 kernel_ulong_t driver_info; 175 kernel_ulong_t driver_info;
176}; 176};
177 177
178#define AP_DEVICE_ID_MATCH_DEVICE_TYPE 0x01 178#define AP_DEVICE_ID_MATCH_CARD_TYPE 0x01
179#define AP_DEVICE_ID_MATCH_QUEUE_TYPE 0x02
179 180
180/* s390 css bus devices (subchannels) */ 181/* s390 css bus devices (subchannels) */
181struct css_device_id { 182struct css_device_id {