aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorLinus Torvalds <torvalds@linux-foundation.org>2017-02-28 02:03:04 -0500
committerLinus Torvalds <torvalds@linux-foundation.org>2017-02-28 02:03:04 -0500
commit3f5595e3d0180305cfef9a9c7c6265d7ade85dea (patch)
treeaaeca311f61794ff26be9bcdc3491e0c44ff0d67
parent12dfdfedbf8ce3b1464e2cea80014fa0a92ed3e2 (diff)
parentfb94a687d96c570d46332a4a890f1dcb7310e643 (diff)
Merge branch 'for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/s390/linux
Pull more s390 updates from Martin Schwidefsky: "Next to the usual bug fixes (including the TASK_SIZE fix), there is one larger crypto item. It allows to use protected keys with the in-kernel crypto API The protected key support has two parts, the pkey user space API to convert key formats and the paes crypto module that uses a protected key instead of a standard AES key" * 'for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/s390/linux: s390: TASK_SIZE for kernel threads s390/crypt: Add protected key AES module s390/dasd: fix spelling mistake: "supportet" -> "supported" s390/pkey: Introduce pkey kernel module s390/zcrypt: export additional symbols s390/zcrypt: Rework CONFIG_ZCRYPT Kconfig text. s390/zcrypt: Cleanup leftover module code. s390/nmi: purge tlbs after control register validation s390/nmi: fix order of register validation s390/crypto: Add PCKMO inline function s390/zcrypt: Enable request count reset for cards and queues. s390/mm: use _SEGMENT_ENTRY_EMPTY in the code s390/chsc: Add exception handler for CHSC instruction s390: opt into HAVE_COPY_THREAD_TLS s390: restore address space when returning to user space s390: rename CIF_ASCE to CIF_ASCE_PRIMARY
-rw-r--r--arch/s390/Kconfig1
-rw-r--r--arch/s390/configs/default_defconfig1
-rw-r--r--arch/s390/configs/performance_defconfig1
-rw-r--r--arch/s390/crypto/Makefile2
-rw-r--r--arch/s390/crypto/paes_s390.c619
-rw-r--r--arch/s390/defconfig1
-rw-r--r--arch/s390/include/asm/cpacf.h46
-rw-r--r--arch/s390/include/asm/mmu_context.h4
-rw-r--r--arch/s390/include/asm/pgtable.h14
-rw-r--r--arch/s390/include/asm/pkey.h90
-rw-r--r--arch/s390/include/asm/processor.h19
-rw-r--r--arch/s390/include/asm/uaccess.h23
-rw-r--r--arch/s390/include/uapi/asm/Kbuild1
-rw-r--r--arch/s390/include/uapi/asm/pkey.h112
-rw-r--r--arch/s390/kernel/entry.S33
-rw-r--r--arch/s390/kernel/entry.h1
-rw-r--r--arch/s390/kernel/nmi.c25
-rw-r--r--arch/s390/kernel/process.c18
-rw-r--r--arch/s390/mm/gmap.c6
-rw-r--r--arch/s390/mm/hugetlbpage.c2
-rw-r--r--drivers/crypto/Kconfig32
-rw-r--r--drivers/s390/block/dasd_eckd.c2
-rw-r--r--drivers/s390/cio/ioasm.c8
-rw-r--r--drivers/s390/crypto/Makefile4
-rw-r--r--drivers/s390/crypto/ap_bus.c10
-rw-r--r--drivers/s390/crypto/ap_card.c24
-rw-r--r--drivers/s390/crypto/ap_queue.c21
-rw-r--r--drivers/s390/crypto/pkey_api.c1148
-rw-r--r--drivers/s390/crypto/zcrypt_api.c5
-rw-r--r--drivers/s390/crypto/zcrypt_api.h2
30 files changed, 2182 insertions, 93 deletions
diff --git a/arch/s390/Kconfig b/arch/s390/Kconfig
index d5c1073a2584..a2dcef0aacc7 100644
--- a/arch/s390/Kconfig
+++ b/arch/s390/Kconfig
@@ -134,6 +134,7 @@ config S390
134 select HAVE_EBPF_JIT if PACK_STACK && HAVE_MARCH_Z196_FEATURES 134 select HAVE_EBPF_JIT if PACK_STACK && HAVE_MARCH_Z196_FEATURES
135 select HAVE_CMPXCHG_DOUBLE 135 select HAVE_CMPXCHG_DOUBLE
136 select HAVE_CMPXCHG_LOCAL 136 select HAVE_CMPXCHG_LOCAL
137 select HAVE_COPY_THREAD_TLS
137 select HAVE_DEBUG_KMEMLEAK 138 select HAVE_DEBUG_KMEMLEAK
138 select HAVE_DMA_API_DEBUG 139 select HAVE_DMA_API_DEBUG
139 select HAVE_DMA_CONTIGUOUS 140 select HAVE_DMA_CONTIGUOUS
diff --git a/arch/s390/configs/default_defconfig b/arch/s390/configs/default_defconfig
index e00975361fec..143b1e00b818 100644
--- a/arch/s390/configs/default_defconfig
+++ b/arch/s390/configs/default_defconfig
@@ -678,6 +678,7 @@ CONFIG_CRYPTO_USER_API_SKCIPHER=m
678CONFIG_CRYPTO_USER_API_RNG=m 678CONFIG_CRYPTO_USER_API_RNG=m
679CONFIG_CRYPTO_USER_API_AEAD=m 679CONFIG_CRYPTO_USER_API_AEAD=m
680CONFIG_ZCRYPT=m 680CONFIG_ZCRYPT=m
681CONFIG_PKEY=m
681CONFIG_CRYPTO_SHA1_S390=m 682CONFIG_CRYPTO_SHA1_S390=m
682CONFIG_CRYPTO_SHA256_S390=m 683CONFIG_CRYPTO_SHA256_S390=m
683CONFIG_CRYPTO_SHA512_S390=m 684CONFIG_CRYPTO_SHA512_S390=m
diff --git a/arch/s390/configs/performance_defconfig b/arch/s390/configs/performance_defconfig
index 2cf87343b590..2358bf33c5ef 100644
--- a/arch/s390/configs/performance_defconfig
+++ b/arch/s390/configs/performance_defconfig
@@ -628,6 +628,7 @@ CONFIG_CRYPTO_USER_API_SKCIPHER=m
628CONFIG_CRYPTO_USER_API_RNG=m 628CONFIG_CRYPTO_USER_API_RNG=m
629CONFIG_CRYPTO_USER_API_AEAD=m 629CONFIG_CRYPTO_USER_API_AEAD=m
630CONFIG_ZCRYPT=m 630CONFIG_ZCRYPT=m
631CONFIG_PKEY=m
631CONFIG_CRYPTO_SHA1_S390=m 632CONFIG_CRYPTO_SHA1_S390=m
632CONFIG_CRYPTO_SHA256_S390=m 633CONFIG_CRYPTO_SHA256_S390=m
633CONFIG_CRYPTO_SHA512_S390=m 634CONFIG_CRYPTO_SHA512_S390=m
diff --git a/arch/s390/crypto/Makefile b/arch/s390/crypto/Makefile
index d1033de4c4ee..402c530c6da5 100644
--- a/arch/s390/crypto/Makefile
+++ b/arch/s390/crypto/Makefile
@@ -6,7 +6,7 @@ obj-$(CONFIG_CRYPTO_SHA1_S390) += sha1_s390.o sha_common.o
6obj-$(CONFIG_CRYPTO_SHA256_S390) += sha256_s390.o sha_common.o 6obj-$(CONFIG_CRYPTO_SHA256_S390) += sha256_s390.o sha_common.o
7obj-$(CONFIG_CRYPTO_SHA512_S390) += sha512_s390.o sha_common.o 7obj-$(CONFIG_CRYPTO_SHA512_S390) += sha512_s390.o sha_common.o
8obj-$(CONFIG_CRYPTO_DES_S390) += des_s390.o 8obj-$(CONFIG_CRYPTO_DES_S390) += des_s390.o
9obj-$(CONFIG_CRYPTO_AES_S390) += aes_s390.o 9obj-$(CONFIG_CRYPTO_AES_S390) += aes_s390.o paes_s390.o
10obj-$(CONFIG_S390_PRNG) += prng.o 10obj-$(CONFIG_S390_PRNG) += prng.o
11obj-$(CONFIG_CRYPTO_GHASH_S390) += ghash_s390.o 11obj-$(CONFIG_CRYPTO_GHASH_S390) += ghash_s390.o
12obj-$(CONFIG_CRYPTO_CRC32_S390) += crc32-vx_s390.o 12obj-$(CONFIG_CRYPTO_CRC32_S390) += crc32-vx_s390.o
diff --git a/arch/s390/crypto/paes_s390.c b/arch/s390/crypto/paes_s390.c
new file mode 100644
index 000000000000..d69ea495c4d7
--- /dev/null
+++ b/arch/s390/crypto/paes_s390.c
@@ -0,0 +1,619 @@
1/*
2 * Cryptographic API.
3 *
4 * s390 implementation of the AES Cipher Algorithm with protected keys.
5 *
6 * s390 Version:
7 * Copyright IBM Corp. 2017
8 * Author(s): Martin Schwidefsky <schwidefsky@de.ibm.com>
9 * Harald Freudenberger <freude@de.ibm.com>
10 *
11 * This program is free software; you can redistribute it and/or modify
12 * it under the terms of the GNU General Public License (version 2 only)
13 * as published by the Free Software Foundation.
14 *
15 */
16
17#define KMSG_COMPONENT "paes_s390"
18#define pr_fmt(fmt) KMSG_COMPONENT ": " fmt
19
20#include <crypto/aes.h>
21#include <crypto/algapi.h>
22#include <linux/bug.h>
23#include <linux/err.h>
24#include <linux/module.h>
25#include <linux/cpufeature.h>
26#include <linux/init.h>
27#include <linux/spinlock.h>
28#include <crypto/xts.h>
29#include <asm/cpacf.h>
30#include <asm/pkey.h>
31
32static u8 *ctrblk;
33static DEFINE_SPINLOCK(ctrblk_lock);
34
35static cpacf_mask_t km_functions, kmc_functions, kmctr_functions;
36
37struct s390_paes_ctx {
38 struct pkey_seckey sk;
39 struct pkey_protkey pk;
40 unsigned long fc;
41};
42
43struct s390_pxts_ctx {
44 struct pkey_seckey sk[2];
45 struct pkey_protkey pk[2];
46 unsigned long fc;
47};
48
49static inline int __paes_convert_key(struct pkey_seckey *sk,
50 struct pkey_protkey *pk)
51{
52 int i, ret;
53
54 /* try three times in case of failure */
55 for (i = 0; i < 3; i++) {
56 ret = pkey_skey2pkey(sk, pk);
57 if (ret == 0)
58 break;
59 }
60
61 return ret;
62}
63
64static int __paes_set_key(struct s390_paes_ctx *ctx)
65{
66 unsigned long fc;
67
68 if (__paes_convert_key(&ctx->sk, &ctx->pk))
69 return -EINVAL;
70
71 /* Pick the correct function code based on the protected key type */
72 fc = (ctx->pk.type == PKEY_KEYTYPE_AES_128) ? CPACF_KM_PAES_128 :
73 (ctx->pk.type == PKEY_KEYTYPE_AES_192) ? CPACF_KM_PAES_192 :
74 (ctx->pk.type == PKEY_KEYTYPE_AES_256) ? CPACF_KM_PAES_256 : 0;
75
76 /* Check if the function code is available */
77 ctx->fc = (fc && cpacf_test_func(&km_functions, fc)) ? fc : 0;
78
79 return ctx->fc ? 0 : -EINVAL;
80}
81
82static int ecb_paes_set_key(struct crypto_tfm *tfm, const u8 *in_key,
83 unsigned int key_len)
84{
85 struct s390_paes_ctx *ctx = crypto_tfm_ctx(tfm);
86
87 if (key_len != SECKEYBLOBSIZE)
88 return -EINVAL;
89
90 memcpy(ctx->sk.seckey, in_key, SECKEYBLOBSIZE);
91 if (__paes_set_key(ctx)) {
92 tfm->crt_flags |= CRYPTO_TFM_RES_BAD_KEY_LEN;
93 return -EINVAL;
94 }
95 return 0;
96}
97
98static int ecb_paes_crypt(struct blkcipher_desc *desc,
99 unsigned long modifier,
100 struct blkcipher_walk *walk)
101{
102 struct s390_paes_ctx *ctx = crypto_blkcipher_ctx(desc->tfm);
103 unsigned int nbytes, n, k;
104 int ret;
105
106 ret = blkcipher_walk_virt(desc, walk);
107 while ((nbytes = walk->nbytes) >= AES_BLOCK_SIZE) {
108 /* only use complete blocks */
109 n = nbytes & ~(AES_BLOCK_SIZE - 1);
110 k = cpacf_km(ctx->fc | modifier, ctx->pk.protkey,
111 walk->dst.virt.addr, walk->src.virt.addr, n);
112 if (k)
113 ret = blkcipher_walk_done(desc, walk, nbytes - k);
114 if (k < n) {
115 if (__paes_set_key(ctx) != 0)
116 return blkcipher_walk_done(desc, walk, -EIO);
117 }
118 }
119 return ret;
120}
121
122static int ecb_paes_encrypt(struct blkcipher_desc *desc,
123 struct scatterlist *dst, struct scatterlist *src,
124 unsigned int nbytes)
125{
126 struct blkcipher_walk walk;
127
128 blkcipher_walk_init(&walk, dst, src, nbytes);
129 return ecb_paes_crypt(desc, CPACF_ENCRYPT, &walk);
130}
131
132static int ecb_paes_decrypt(struct blkcipher_desc *desc,
133 struct scatterlist *dst, struct scatterlist *src,
134 unsigned int nbytes)
135{
136 struct blkcipher_walk walk;
137
138 blkcipher_walk_init(&walk, dst, src, nbytes);
139 return ecb_paes_crypt(desc, CPACF_DECRYPT, &walk);
140}
141
142static struct crypto_alg ecb_paes_alg = {
143 .cra_name = "ecb(paes)",
144 .cra_driver_name = "ecb-paes-s390",
145 .cra_priority = 400, /* combo: aes + ecb */
146 .cra_flags = CRYPTO_ALG_TYPE_BLKCIPHER,
147 .cra_blocksize = AES_BLOCK_SIZE,
148 .cra_ctxsize = sizeof(struct s390_paes_ctx),
149 .cra_type = &crypto_blkcipher_type,
150 .cra_module = THIS_MODULE,
151 .cra_list = LIST_HEAD_INIT(ecb_paes_alg.cra_list),
152 .cra_u = {
153 .blkcipher = {
154 .min_keysize = SECKEYBLOBSIZE,
155 .max_keysize = SECKEYBLOBSIZE,
156 .setkey = ecb_paes_set_key,
157 .encrypt = ecb_paes_encrypt,
158 .decrypt = ecb_paes_decrypt,
159 }
160 }
161};
162
163static int __cbc_paes_set_key(struct s390_paes_ctx *ctx)
164{
165 unsigned long fc;
166
167 if (__paes_convert_key(&ctx->sk, &ctx->pk))
168 return -EINVAL;
169
170 /* Pick the correct function code based on the protected key type */
171 fc = (ctx->pk.type == PKEY_KEYTYPE_AES_128) ? CPACF_KMC_PAES_128 :
172 (ctx->pk.type == PKEY_KEYTYPE_AES_192) ? CPACF_KMC_PAES_192 :
173 (ctx->pk.type == PKEY_KEYTYPE_AES_256) ? CPACF_KMC_PAES_256 : 0;
174
175 /* Check if the function code is available */
176 ctx->fc = (fc && cpacf_test_func(&kmc_functions, fc)) ? fc : 0;
177
178 return ctx->fc ? 0 : -EINVAL;
179}
180
181static int cbc_paes_set_key(struct crypto_tfm *tfm, const u8 *in_key,
182 unsigned int key_len)
183{
184 struct s390_paes_ctx *ctx = crypto_tfm_ctx(tfm);
185
186 memcpy(ctx->sk.seckey, in_key, SECKEYBLOBSIZE);
187 if (__cbc_paes_set_key(ctx)) {
188 tfm->crt_flags |= CRYPTO_TFM_RES_BAD_KEY_LEN;
189 return -EINVAL;
190 }
191 return 0;
192}
193
194static int cbc_paes_crypt(struct blkcipher_desc *desc, unsigned long modifier,
195 struct blkcipher_walk *walk)
196{
197 struct s390_paes_ctx *ctx = crypto_blkcipher_ctx(desc->tfm);
198 unsigned int nbytes, n, k;
199 int ret;
200 struct {
201 u8 iv[AES_BLOCK_SIZE];
202 u8 key[MAXPROTKEYSIZE];
203 } param;
204
205 ret = blkcipher_walk_virt(desc, walk);
206 memcpy(param.iv, walk->iv, AES_BLOCK_SIZE);
207 memcpy(param.key, ctx->pk.protkey, MAXPROTKEYSIZE);
208 while ((nbytes = walk->nbytes) >= AES_BLOCK_SIZE) {
209 /* only use complete blocks */
210 n = nbytes & ~(AES_BLOCK_SIZE - 1);
211 k = cpacf_kmc(ctx->fc | modifier, &param,
212 walk->dst.virt.addr, walk->src.virt.addr, n);
213 if (k)
214 ret = blkcipher_walk_done(desc, walk, nbytes - k);
215 if (n < k) {
216 if (__cbc_paes_set_key(ctx) != 0)
217 return blkcipher_walk_done(desc, walk, -EIO);
218 memcpy(param.key, ctx->pk.protkey, MAXPROTKEYSIZE);
219 }
220 }
221 memcpy(walk->iv, param.iv, AES_BLOCK_SIZE);
222 return ret;
223}
224
225static int cbc_paes_encrypt(struct blkcipher_desc *desc,
226 struct scatterlist *dst, struct scatterlist *src,
227 unsigned int nbytes)
228{
229 struct blkcipher_walk walk;
230
231 blkcipher_walk_init(&walk, dst, src, nbytes);
232 return cbc_paes_crypt(desc, 0, &walk);
233}
234
235static int cbc_paes_decrypt(struct blkcipher_desc *desc,
236 struct scatterlist *dst, struct scatterlist *src,
237 unsigned int nbytes)
238{
239 struct blkcipher_walk walk;
240
241 blkcipher_walk_init(&walk, dst, src, nbytes);
242 return cbc_paes_crypt(desc, CPACF_DECRYPT, &walk);
243}
244
245static struct crypto_alg cbc_paes_alg = {
246 .cra_name = "cbc(paes)",
247 .cra_driver_name = "cbc-paes-s390",
248 .cra_priority = 400, /* combo: aes + cbc */
249 .cra_flags = CRYPTO_ALG_TYPE_BLKCIPHER,
250 .cra_blocksize = AES_BLOCK_SIZE,
251 .cra_ctxsize = sizeof(struct s390_paes_ctx),
252 .cra_type = &crypto_blkcipher_type,
253 .cra_module = THIS_MODULE,
254 .cra_list = LIST_HEAD_INIT(cbc_paes_alg.cra_list),
255 .cra_u = {
256 .blkcipher = {
257 .min_keysize = SECKEYBLOBSIZE,
258 .max_keysize = SECKEYBLOBSIZE,
259 .ivsize = AES_BLOCK_SIZE,
260 .setkey = cbc_paes_set_key,
261 .encrypt = cbc_paes_encrypt,
262 .decrypt = cbc_paes_decrypt,
263 }
264 }
265};
266
267static int __xts_paes_set_key(struct s390_pxts_ctx *ctx)
268{
269 unsigned long fc;
270
271 if (__paes_convert_key(&ctx->sk[0], &ctx->pk[0]) ||
272 __paes_convert_key(&ctx->sk[1], &ctx->pk[1]))
273 return -EINVAL;
274
275 if (ctx->pk[0].type != ctx->pk[1].type)
276 return -EINVAL;
277
278 /* Pick the correct function code based on the protected key type */
279 fc = (ctx->pk[0].type == PKEY_KEYTYPE_AES_128) ? CPACF_KM_PXTS_128 :
280 (ctx->pk[0].type == PKEY_KEYTYPE_AES_256) ?
281 CPACF_KM_PXTS_256 : 0;
282
283 /* Check if the function code is available */
284 ctx->fc = (fc && cpacf_test_func(&km_functions, fc)) ? fc : 0;
285
286 return ctx->fc ? 0 : -EINVAL;
287}
288
289static int xts_paes_set_key(struct crypto_tfm *tfm, const u8 *in_key,
290 unsigned int key_len)
291{
292 struct s390_pxts_ctx *ctx = crypto_tfm_ctx(tfm);
293 u8 ckey[2 * AES_MAX_KEY_SIZE];
294 unsigned int ckey_len;
295
296 memcpy(ctx->sk[0].seckey, in_key, SECKEYBLOBSIZE);
297 memcpy(ctx->sk[1].seckey, in_key + SECKEYBLOBSIZE, SECKEYBLOBSIZE);
298 if (__xts_paes_set_key(ctx)) {
299 tfm->crt_flags |= CRYPTO_TFM_RES_BAD_KEY_LEN;
300 return -EINVAL;
301 }
302
303 /*
304 * xts_check_key verifies the key length is not odd and makes
305 * sure that the two keys are not the same. This can be done
306 * on the two protected keys as well
307 */
308 ckey_len = (ctx->pk[0].type == PKEY_KEYTYPE_AES_128) ?
309 AES_KEYSIZE_128 : AES_KEYSIZE_256;
310 memcpy(ckey, ctx->pk[0].protkey, ckey_len);
311 memcpy(ckey + ckey_len, ctx->pk[1].protkey, ckey_len);
312 return xts_check_key(tfm, ckey, 2*ckey_len);
313}
314
315static int xts_paes_crypt(struct blkcipher_desc *desc, unsigned long modifier,
316 struct blkcipher_walk *walk)
317{
318 struct s390_pxts_ctx *ctx = crypto_blkcipher_ctx(desc->tfm);
319 unsigned int keylen, offset, nbytes, n, k;
320 int ret;
321 struct {
322 u8 key[MAXPROTKEYSIZE]; /* key + verification pattern */
323 u8 tweak[16];
324 u8 block[16];
325 u8 bit[16];
326 u8 xts[16];
327 } pcc_param;
328 struct {
329 u8 key[MAXPROTKEYSIZE]; /* key + verification pattern */
330 u8 init[16];
331 } xts_param;
332
333 ret = blkcipher_walk_virt(desc, walk);
334 keylen = (ctx->pk[0].type == PKEY_KEYTYPE_AES_128) ? 48 : 64;
335 offset = (ctx->pk[0].type == PKEY_KEYTYPE_AES_128) ? 16 : 0;
336retry:
337 memset(&pcc_param, 0, sizeof(pcc_param));
338 memcpy(pcc_param.tweak, walk->iv, sizeof(pcc_param.tweak));
339 memcpy(pcc_param.key + offset, ctx->pk[1].protkey, keylen);
340 cpacf_pcc(ctx->fc, pcc_param.key + offset);
341
342 memcpy(xts_param.key + offset, ctx->pk[0].protkey, keylen);
343 memcpy(xts_param.init, pcc_param.xts, 16);
344
345 while ((nbytes = walk->nbytes) >= AES_BLOCK_SIZE) {
346 /* only use complete blocks */
347 n = nbytes & ~(AES_BLOCK_SIZE - 1);
348 k = cpacf_km(ctx->fc | modifier, xts_param.key + offset,
349 walk->dst.virt.addr, walk->src.virt.addr, n);
350 if (k)
351 ret = blkcipher_walk_done(desc, walk, nbytes - k);
352 if (k < n) {
353 if (__xts_paes_set_key(ctx) != 0)
354 return blkcipher_walk_done(desc, walk, -EIO);
355 goto retry;
356 }
357 }
358 return ret;
359}
360
361static int xts_paes_encrypt(struct blkcipher_desc *desc,
362 struct scatterlist *dst, struct scatterlist *src,
363 unsigned int nbytes)
364{
365 struct blkcipher_walk walk;
366
367 blkcipher_walk_init(&walk, dst, src, nbytes);
368 return xts_paes_crypt(desc, 0, &walk);
369}
370
371static int xts_paes_decrypt(struct blkcipher_desc *desc,
372 struct scatterlist *dst, struct scatterlist *src,
373 unsigned int nbytes)
374{
375 struct blkcipher_walk walk;
376
377 blkcipher_walk_init(&walk, dst, src, nbytes);
378 return xts_paes_crypt(desc, CPACF_DECRYPT, &walk);
379}
380
381static struct crypto_alg xts_paes_alg = {
382 .cra_name = "xts(paes)",
383 .cra_driver_name = "xts-paes-s390",
384 .cra_priority = 400, /* combo: aes + xts */
385 .cra_flags = CRYPTO_ALG_TYPE_BLKCIPHER,
386 .cra_blocksize = AES_BLOCK_SIZE,
387 .cra_ctxsize = sizeof(struct s390_pxts_ctx),
388 .cra_type = &crypto_blkcipher_type,
389 .cra_module = THIS_MODULE,
390 .cra_list = LIST_HEAD_INIT(xts_paes_alg.cra_list),
391 .cra_u = {
392 .blkcipher = {
393 .min_keysize = 2 * SECKEYBLOBSIZE,
394 .max_keysize = 2 * SECKEYBLOBSIZE,
395 .ivsize = AES_BLOCK_SIZE,
396 .setkey = xts_paes_set_key,
397 .encrypt = xts_paes_encrypt,
398 .decrypt = xts_paes_decrypt,
399 }
400 }
401};
402
403static int __ctr_paes_set_key(struct s390_paes_ctx *ctx)
404{
405 unsigned long fc;
406
407 if (__paes_convert_key(&ctx->sk, &ctx->pk))
408 return -EINVAL;
409
410 /* Pick the correct function code based on the protected key type */
411 fc = (ctx->pk.type == PKEY_KEYTYPE_AES_128) ? CPACF_KMCTR_PAES_128 :
412 (ctx->pk.type == PKEY_KEYTYPE_AES_192) ? CPACF_KMCTR_PAES_192 :
413 (ctx->pk.type == PKEY_KEYTYPE_AES_256) ?
414 CPACF_KMCTR_PAES_256 : 0;
415
416 /* Check if the function code is available */
417 ctx->fc = (fc && cpacf_test_func(&kmctr_functions, fc)) ? fc : 0;
418
419 return ctx->fc ? 0 : -EINVAL;
420}
421
422static int ctr_paes_set_key(struct crypto_tfm *tfm, const u8 *in_key,
423 unsigned int key_len)
424{
425 struct s390_paes_ctx *ctx = crypto_tfm_ctx(tfm);
426
427 memcpy(ctx->sk.seckey, in_key, key_len);
428 if (__ctr_paes_set_key(ctx)) {
429 tfm->crt_flags |= CRYPTO_TFM_RES_BAD_KEY_LEN;
430 return -EINVAL;
431 }
432 return 0;
433}
434
435static unsigned int __ctrblk_init(u8 *ctrptr, u8 *iv, unsigned int nbytes)
436{
437 unsigned int i, n;
438
439 /* only use complete blocks, max. PAGE_SIZE */
440 memcpy(ctrptr, iv, AES_BLOCK_SIZE);
441 n = (nbytes > PAGE_SIZE) ? PAGE_SIZE : nbytes & ~(AES_BLOCK_SIZE - 1);
442 for (i = (n / AES_BLOCK_SIZE) - 1; i > 0; i--) {
443 memcpy(ctrptr + AES_BLOCK_SIZE, ctrptr, AES_BLOCK_SIZE);
444 crypto_inc(ctrptr + AES_BLOCK_SIZE, AES_BLOCK_SIZE);
445 ctrptr += AES_BLOCK_SIZE;
446 }
447 return n;
448}
449
450static int ctr_paes_crypt(struct blkcipher_desc *desc, unsigned long modifier,
451 struct blkcipher_walk *walk)
452{
453 struct s390_paes_ctx *ctx = crypto_blkcipher_ctx(desc->tfm);
454 u8 buf[AES_BLOCK_SIZE], *ctrptr;
455 unsigned int nbytes, n, k;
456 int ret, locked;
457
458 locked = spin_trylock(&ctrblk_lock);
459
460 ret = blkcipher_walk_virt_block(desc, walk, AES_BLOCK_SIZE);
461 while ((nbytes = walk->nbytes) >= AES_BLOCK_SIZE) {
462 n = AES_BLOCK_SIZE;
463 if (nbytes >= 2*AES_BLOCK_SIZE && locked)
464 n = __ctrblk_init(ctrblk, walk->iv, nbytes);
465 ctrptr = (n > AES_BLOCK_SIZE) ? ctrblk : walk->iv;
466 k = cpacf_kmctr(ctx->fc | modifier, ctx->pk.protkey,
467 walk->dst.virt.addr, walk->src.virt.addr,
468 n, ctrptr);
469 if (k) {
470 if (ctrptr == ctrblk)
471 memcpy(walk->iv, ctrptr + k - AES_BLOCK_SIZE,
472 AES_BLOCK_SIZE);
473 crypto_inc(walk->iv, AES_BLOCK_SIZE);
474 ret = blkcipher_walk_done(desc, walk, nbytes - n);
475 }
476 if (k < n) {
477 if (__ctr_paes_set_key(ctx) != 0)
478 return blkcipher_walk_done(desc, walk, -EIO);
479 }
480 }
481 if (locked)
482 spin_unlock(&ctrblk_lock);
483 /*
484 * final block may be < AES_BLOCK_SIZE, copy only nbytes
485 */
486 if (nbytes) {
487 while (1) {
488 if (cpacf_kmctr(ctx->fc | modifier,
489 ctx->pk.protkey, buf,
490 walk->src.virt.addr, AES_BLOCK_SIZE,
491 walk->iv) == AES_BLOCK_SIZE)
492 break;
493 if (__ctr_paes_set_key(ctx) != 0)
494 return blkcipher_walk_done(desc, walk, -EIO);
495 }
496 memcpy(walk->dst.virt.addr, buf, nbytes);
497 crypto_inc(walk->iv, AES_BLOCK_SIZE);
498 ret = blkcipher_walk_done(desc, walk, 0);
499 }
500
501 return ret;
502}
503
504static int ctr_paes_encrypt(struct blkcipher_desc *desc,
505 struct scatterlist *dst, struct scatterlist *src,
506 unsigned int nbytes)
507{
508 struct blkcipher_walk walk;
509
510 blkcipher_walk_init(&walk, dst, src, nbytes);
511 return ctr_paes_crypt(desc, 0, &walk);
512}
513
514static int ctr_paes_decrypt(struct blkcipher_desc *desc,
515 struct scatterlist *dst, struct scatterlist *src,
516 unsigned int nbytes)
517{
518 struct blkcipher_walk walk;
519
520 blkcipher_walk_init(&walk, dst, src, nbytes);
521 return ctr_paes_crypt(desc, CPACF_DECRYPT, &walk);
522}
523
524static struct crypto_alg ctr_paes_alg = {
525 .cra_name = "ctr(paes)",
526 .cra_driver_name = "ctr-paes-s390",
527 .cra_priority = 400, /* combo: aes + ctr */
528 .cra_flags = CRYPTO_ALG_TYPE_BLKCIPHER,
529 .cra_blocksize = 1,
530 .cra_ctxsize = sizeof(struct s390_paes_ctx),
531 .cra_type = &crypto_blkcipher_type,
532 .cra_module = THIS_MODULE,
533 .cra_list = LIST_HEAD_INIT(ctr_paes_alg.cra_list),
534 .cra_u = {
535 .blkcipher = {
536 .min_keysize = SECKEYBLOBSIZE,
537 .max_keysize = SECKEYBLOBSIZE,
538 .ivsize = AES_BLOCK_SIZE,
539 .setkey = ctr_paes_set_key,
540 .encrypt = ctr_paes_encrypt,
541 .decrypt = ctr_paes_decrypt,
542 }
543 }
544};
545
546static inline void __crypto_unregister_alg(struct crypto_alg *alg)
547{
548 if (!list_empty(&alg->cra_list))
549 crypto_unregister_alg(alg);
550}
551
552static void paes_s390_fini(void)
553{
554 if (ctrblk)
555 free_page((unsigned long) ctrblk);
556 __crypto_unregister_alg(&ctr_paes_alg);
557 __crypto_unregister_alg(&xts_paes_alg);
558 __crypto_unregister_alg(&cbc_paes_alg);
559 __crypto_unregister_alg(&ecb_paes_alg);
560}
561
562static int __init paes_s390_init(void)
563{
564 int ret;
565
566 /* Query available functions for KM, KMC and KMCTR */
567 cpacf_query(CPACF_KM, &km_functions);
568 cpacf_query(CPACF_KMC, &kmc_functions);
569 cpacf_query(CPACF_KMCTR, &kmctr_functions);
570
571 if (cpacf_test_func(&km_functions, CPACF_KM_PAES_128) ||
572 cpacf_test_func(&km_functions, CPACF_KM_PAES_192) ||
573 cpacf_test_func(&km_functions, CPACF_KM_PAES_256)) {
574 ret = crypto_register_alg(&ecb_paes_alg);
575 if (ret)
576 goto out_err;
577 }
578
579 if (cpacf_test_func(&kmc_functions, CPACF_KMC_PAES_128) ||
580 cpacf_test_func(&kmc_functions, CPACF_KMC_PAES_192) ||
581 cpacf_test_func(&kmc_functions, CPACF_KMC_PAES_256)) {
582 ret = crypto_register_alg(&cbc_paes_alg);
583 if (ret)
584 goto out_err;
585 }
586
587 if (cpacf_test_func(&km_functions, CPACF_KM_PXTS_128) ||
588 cpacf_test_func(&km_functions, CPACF_KM_PXTS_256)) {
589 ret = crypto_register_alg(&xts_paes_alg);
590 if (ret)
591 goto out_err;
592 }
593
594 if (cpacf_test_func(&kmctr_functions, CPACF_KMCTR_PAES_128) ||
595 cpacf_test_func(&kmctr_functions, CPACF_KMCTR_PAES_192) ||
596 cpacf_test_func(&kmctr_functions, CPACF_KMCTR_PAES_256)) {
597 ret = crypto_register_alg(&ctr_paes_alg);
598 if (ret)
599 goto out_err;
600 ctrblk = (u8 *) __get_free_page(GFP_KERNEL);
601 if (!ctrblk) {
602 ret = -ENOMEM;
603 goto out_err;
604 }
605 }
606
607 return 0;
608out_err:
609 paes_s390_fini();
610 return ret;
611}
612
613module_init(paes_s390_init);
614module_exit(paes_s390_fini);
615
616MODULE_ALIAS_CRYPTO("aes-all");
617
618MODULE_DESCRIPTION("Rijndael (AES) Cipher Algorithm with protected keys");
619MODULE_LICENSE("GPL");
diff --git a/arch/s390/defconfig b/arch/s390/defconfig
index d00e368fb5e6..68bfd09f1b02 100644
--- a/arch/s390/defconfig
+++ b/arch/s390/defconfig
@@ -229,6 +229,7 @@ CONFIG_CRYPTO_USER_API_HASH=m
229CONFIG_CRYPTO_USER_API_SKCIPHER=m 229CONFIG_CRYPTO_USER_API_SKCIPHER=m
230CONFIG_CRYPTO_USER_API_RNG=m 230CONFIG_CRYPTO_USER_API_RNG=m
231CONFIG_ZCRYPT=m 231CONFIG_ZCRYPT=m
232CONFIG_PKEY=m
232CONFIG_CRYPTO_SHA1_S390=m 233CONFIG_CRYPTO_SHA1_S390=m
233CONFIG_CRYPTO_SHA256_S390=m 234CONFIG_CRYPTO_SHA256_S390=m
234CONFIG_CRYPTO_SHA512_S390=m 235CONFIG_CRYPTO_SHA512_S390=m
diff --git a/arch/s390/include/asm/cpacf.h b/arch/s390/include/asm/cpacf.h
index 2c680db7e5c1..e2dfbf280d12 100644
--- a/arch/s390/include/asm/cpacf.h
+++ b/arch/s390/include/asm/cpacf.h
@@ -28,8 +28,9 @@
28#define CPACF_PPNO 0xb93c /* MSA5 */ 28#define CPACF_PPNO 0xb93c /* MSA5 */
29 29
30/* 30/*
31 * Decryption modifier bit 31 * En/decryption modifier bits
32 */ 32 */
33#define CPACF_ENCRYPT 0x00
33#define CPACF_DECRYPT 0x80 34#define CPACF_DECRYPT 0x80
34 35
35/* 36/*
@@ -42,8 +43,13 @@
42#define CPACF_KM_AES_128 0x12 43#define CPACF_KM_AES_128 0x12
43#define CPACF_KM_AES_192 0x13 44#define CPACF_KM_AES_192 0x13
44#define CPACF_KM_AES_256 0x14 45#define CPACF_KM_AES_256 0x14
46#define CPACF_KM_PAES_128 0x1a
47#define CPACF_KM_PAES_192 0x1b
48#define CPACF_KM_PAES_256 0x1c
45#define CPACF_KM_XTS_128 0x32 49#define CPACF_KM_XTS_128 0x32
46#define CPACF_KM_XTS_256 0x34 50#define CPACF_KM_XTS_256 0x34
51#define CPACF_KM_PXTS_128 0x3a
52#define CPACF_KM_PXTS_256 0x3c
47 53
48/* 54/*
49 * Function codes for the KMC (CIPHER MESSAGE WITH CHAINING) 55 * Function codes for the KMC (CIPHER MESSAGE WITH CHAINING)
@@ -56,6 +62,9 @@
56#define CPACF_KMC_AES_128 0x12 62#define CPACF_KMC_AES_128 0x12
57#define CPACF_KMC_AES_192 0x13 63#define CPACF_KMC_AES_192 0x13
58#define CPACF_KMC_AES_256 0x14 64#define CPACF_KMC_AES_256 0x14
65#define CPACF_KMC_PAES_128 0x1a
66#define CPACF_KMC_PAES_192 0x1b
67#define CPACF_KMC_PAES_256 0x1c
59#define CPACF_KMC_PRNG 0x43 68#define CPACF_KMC_PRNG 0x43
60 69
61/* 70/*
@@ -69,6 +78,9 @@
69#define CPACF_KMCTR_AES_128 0x12 78#define CPACF_KMCTR_AES_128 0x12
70#define CPACF_KMCTR_AES_192 0x13 79#define CPACF_KMCTR_AES_192 0x13
71#define CPACF_KMCTR_AES_256 0x14 80#define CPACF_KMCTR_AES_256 0x14
81#define CPACF_KMCTR_PAES_128 0x1a
82#define CPACF_KMCTR_PAES_192 0x1b
83#define CPACF_KMCTR_PAES_256 0x1c
72 84
73/* 85/*
74 * Function codes for the KIMD (COMPUTE INTERMEDIATE MESSAGE DIGEST) 86 * Function codes for the KIMD (COMPUTE INTERMEDIATE MESSAGE DIGEST)
@@ -99,6 +111,18 @@
99#define CPACF_KMAC_TDEA_192 0x03 111#define CPACF_KMAC_TDEA_192 0x03
100 112
101/* 113/*
114 * Function codes for the PCKMO (PERFORM CRYPTOGRAPHIC KEY MANAGEMENT)
115 * instruction
116 */
117#define CPACF_PCKMO_QUERY 0x00
118#define CPACF_PCKMO_ENC_DES_KEY 0x01
119#define CPACF_PCKMO_ENC_TDES_128_KEY 0x02
120#define CPACF_PCKMO_ENC_TDES_192_KEY 0x03
121#define CPACF_PCKMO_ENC_AES_128_KEY 0x12
122#define CPACF_PCKMO_ENC_AES_192_KEY 0x13
123#define CPACF_PCKMO_ENC_AES_256_KEY 0x14
124
125/*
102 * Function codes for the PPNO (PERFORM PSEUDORANDOM NUMBER OPERATION) 126 * Function codes for the PPNO (PERFORM PSEUDORANDOM NUMBER OPERATION)
103 * instruction 127 * instruction
104 */ 128 */
@@ -397,4 +421,24 @@ static inline void cpacf_pcc(unsigned long func, void *param)
397 : "cc", "memory"); 421 : "cc", "memory");
398} 422}
399 423
424/**
425 * cpacf_pckmo() - executes the PCKMO (PERFORM CRYPTOGRAPHIC KEY
426 * MANAGEMENT) instruction
427 * @func: the function code passed to PCKMO; see CPACF_PCKMO_xxx defines
428 * @param: address of parameter block; see POP for details on each func
429 *
430 * Returns 0.
431 */
432static inline void cpacf_pckmo(long func, void *param)
433{
434 register unsigned long r0 asm("0") = (unsigned long) func;
435 register unsigned long r1 asm("1") = (unsigned long) param;
436
437 asm volatile(
438 " .insn rre,%[opc] << 16,0,0\n" /* PCKMO opcode */
439 :
440 : [fc] "d" (r0), [pba] "a" (r1), [opc] "i" (CPACF_PCKMO)
441 : "cc", "memory");
442}
443
400#endif /* _ASM_S390_CPACF_H */ 444#endif /* _ASM_S390_CPACF_H */
diff --git a/arch/s390/include/asm/mmu_context.h b/arch/s390/include/asm/mmu_context.h
index 67f7a991c929..9b828c073176 100644
--- a/arch/s390/include/asm/mmu_context.h
+++ b/arch/s390/include/asm/mmu_context.h
@@ -63,7 +63,7 @@ static inline void set_user_asce(struct mm_struct *mm)
63 S390_lowcore.user_asce = mm->context.asce; 63 S390_lowcore.user_asce = mm->context.asce;
64 if (current->thread.mm_segment.ar4) 64 if (current->thread.mm_segment.ar4)
65 __ctl_load(S390_lowcore.user_asce, 7, 7); 65 __ctl_load(S390_lowcore.user_asce, 7, 7);
66 set_cpu_flag(CIF_ASCE); 66 set_cpu_flag(CIF_ASCE_PRIMARY);
67} 67}
68 68
69static inline void clear_user_asce(void) 69static inline void clear_user_asce(void)
@@ -81,7 +81,7 @@ static inline void load_kernel_asce(void)
81 __ctl_store(asce, 1, 1); 81 __ctl_store(asce, 1, 1);
82 if (asce != S390_lowcore.kernel_asce) 82 if (asce != S390_lowcore.kernel_asce)
83 __ctl_load(S390_lowcore.kernel_asce, 1, 1); 83 __ctl_load(S390_lowcore.kernel_asce, 1, 1);
84 set_cpu_flag(CIF_ASCE); 84 set_cpu_flag(CIF_ASCE_PRIMARY);
85} 85}
86 86
87static inline void switch_mm(struct mm_struct *prev, struct mm_struct *next, 87static inline void switch_mm(struct mm_struct *prev, struct mm_struct *next,
diff --git a/arch/s390/include/asm/pgtable.h b/arch/s390/include/asm/pgtable.h
index 52511866fb14..7ed1972b1920 100644
--- a/arch/s390/include/asm/pgtable.h
+++ b/arch/s390/include/asm/pgtable.h
@@ -640,12 +640,12 @@ static inline int pud_bad(pud_t pud)
640 640
641static inline int pmd_present(pmd_t pmd) 641static inline int pmd_present(pmd_t pmd)
642{ 642{
643 return pmd_val(pmd) != _SEGMENT_ENTRY_INVALID; 643 return pmd_val(pmd) != _SEGMENT_ENTRY_EMPTY;
644} 644}
645 645
646static inline int pmd_none(pmd_t pmd) 646static inline int pmd_none(pmd_t pmd)
647{ 647{
648 return pmd_val(pmd) == _SEGMENT_ENTRY_INVALID; 648 return pmd_val(pmd) == _SEGMENT_ENTRY_EMPTY;
649} 649}
650 650
651static inline unsigned long pmd_pfn(pmd_t pmd) 651static inline unsigned long pmd_pfn(pmd_t pmd)
@@ -803,7 +803,7 @@ static inline void pud_clear(pud_t *pud)
803 803
804static inline void pmd_clear(pmd_t *pmdp) 804static inline void pmd_clear(pmd_t *pmdp)
805{ 805{
806 pmd_val(*pmdp) = _SEGMENT_ENTRY_INVALID; 806 pmd_val(*pmdp) = _SEGMENT_ENTRY_EMPTY;
807} 807}
808 808
809static inline void pte_clear(struct mm_struct *mm, unsigned long addr, pte_t *ptep) 809static inline void pte_clear(struct mm_struct *mm, unsigned long addr, pte_t *ptep)
@@ -1357,7 +1357,7 @@ static inline pmd_t pmd_mkhuge(pmd_t pmd)
1357static inline pmd_t pmdp_huge_get_and_clear(struct mm_struct *mm, 1357static inline pmd_t pmdp_huge_get_and_clear(struct mm_struct *mm,
1358 unsigned long addr, pmd_t *pmdp) 1358 unsigned long addr, pmd_t *pmdp)
1359{ 1359{
1360 return pmdp_xchg_direct(mm, addr, pmdp, __pmd(_SEGMENT_ENTRY_INVALID)); 1360 return pmdp_xchg_direct(mm, addr, pmdp, __pmd(_SEGMENT_ENTRY_EMPTY));
1361} 1361}
1362 1362
1363#define __HAVE_ARCH_PMDP_HUGE_GET_AND_CLEAR_FULL 1363#define __HAVE_ARCH_PMDP_HUGE_GET_AND_CLEAR_FULL
@@ -1367,10 +1367,10 @@ static inline pmd_t pmdp_huge_get_and_clear_full(struct mm_struct *mm,
1367{ 1367{
1368 if (full) { 1368 if (full) {
1369 pmd_t pmd = *pmdp; 1369 pmd_t pmd = *pmdp;
1370 *pmdp = __pmd(_SEGMENT_ENTRY_INVALID); 1370 *pmdp = __pmd(_SEGMENT_ENTRY_EMPTY);
1371 return pmd; 1371 return pmd;
1372 } 1372 }
1373 return pmdp_xchg_lazy(mm, addr, pmdp, __pmd(_SEGMENT_ENTRY_INVALID)); 1373 return pmdp_xchg_lazy(mm, addr, pmdp, __pmd(_SEGMENT_ENTRY_EMPTY));
1374} 1374}
1375 1375
1376#define __HAVE_ARCH_PMDP_HUGE_CLEAR_FLUSH 1376#define __HAVE_ARCH_PMDP_HUGE_CLEAR_FLUSH
@@ -1384,7 +1384,7 @@ static inline pmd_t pmdp_huge_clear_flush(struct vm_area_struct *vma,
1384static inline void pmdp_invalidate(struct vm_area_struct *vma, 1384static inline void pmdp_invalidate(struct vm_area_struct *vma,
1385 unsigned long addr, pmd_t *pmdp) 1385 unsigned long addr, pmd_t *pmdp)
1386{ 1386{
1387 pmdp_xchg_direct(vma->vm_mm, addr, pmdp, __pmd(_SEGMENT_ENTRY_INVALID)); 1387 pmdp_xchg_direct(vma->vm_mm, addr, pmdp, __pmd(_SEGMENT_ENTRY_EMPTY));
1388} 1388}
1389 1389
1390#define __HAVE_ARCH_PMDP_SET_WRPROTECT 1390#define __HAVE_ARCH_PMDP_SET_WRPROTECT
diff --git a/arch/s390/include/asm/pkey.h b/arch/s390/include/asm/pkey.h
new file mode 100644
index 000000000000..b48aef4188f6
--- /dev/null
+++ b/arch/s390/include/asm/pkey.h
@@ -0,0 +1,90 @@
1/*
2 * Kernelspace interface to the pkey device driver
3 *
4 * Copyright IBM Corp. 2016
5 *
6 * Author: Harald Freudenberger <freude@de.ibm.com>
7 *
8 */
9
10#ifndef _KAPI_PKEY_H
11#define _KAPI_PKEY_H
12
13#include <linux/ioctl.h>
14#include <linux/types.h>
15#include <uapi/asm/pkey.h>
16
17/*
18 * Generate (AES) random secure key.
19 * @param cardnr may be -1 (use default card)
20 * @param domain may be -1 (use default domain)
21 * @param keytype one of the PKEY_KEYTYPE values
22 * @param seckey pointer to buffer receiving the secure key
23 * @return 0 on success, negative errno value on failure
24 */
25int pkey_genseckey(__u16 cardnr, __u16 domain,
26 __u32 keytype, struct pkey_seckey *seckey);
27
28/*
29 * Generate (AES) secure key with given key value.
30 * @param cardnr may be -1 (use default card)
31 * @param domain may be -1 (use default domain)
32 * @param keytype one of the PKEY_KEYTYPE values
33 * @param clrkey pointer to buffer with clear key data
34 * @param seckey pointer to buffer receiving the secure key
35 * @return 0 on success, negative errno value on failure
36 */
37int pkey_clr2seckey(__u16 cardnr, __u16 domain, __u32 keytype,
38 const struct pkey_clrkey *clrkey,
39 struct pkey_seckey *seckey);
40
41/*
42 * Derive (AES) proteced key from the (AES) secure key blob.
43 * @param cardnr may be -1 (use default card)
44 * @param domain may be -1 (use default domain)
45 * @param seckey pointer to buffer with the input secure key
46 * @param protkey pointer to buffer receiving the protected key and
47 * additional info (type, length)
48 * @return 0 on success, negative errno value on failure
49 */
50int pkey_sec2protkey(__u16 cardnr, __u16 domain,
51 const struct pkey_seckey *seckey,
52 struct pkey_protkey *protkey);
53
54/*
55 * Derive (AES) protected key from a given clear key value.
56 * @param keytype one of the PKEY_KEYTYPE values
57 * @param clrkey pointer to buffer with clear key data
58 * @param protkey pointer to buffer receiving the protected key and
59 * additional info (type, length)
60 * @return 0 on success, negative errno value on failure
61 */
62int pkey_clr2protkey(__u32 keytype,
63 const struct pkey_clrkey *clrkey,
64 struct pkey_protkey *protkey);
65
66/*
67 * Search for a matching crypto card based on the Master Key
68 * Verification Pattern provided inside a secure key.
69 * @param seckey pointer to buffer with the input secure key
70 * @param cardnr pointer to cardnr, receives the card number on success
71 * @param domain pointer to domain, receives the domain number on success
72 * @param verify if set, always verify by fetching verification pattern
73 * from card
74 * @return 0 on success, negative errno value on failure. If no card could be
75 * found, -ENODEV is returned.
76 */
77int pkey_findcard(const struct pkey_seckey *seckey,
78 __u16 *cardnr, __u16 *domain, int verify);
79
80/*
81 * Find card and transform secure key to protected key.
82 * @param seckey pointer to buffer with the input secure key
83 * @param protkey pointer to buffer receiving the protected key and
84 * additional info (type, length)
85 * @return 0 on success, negative errno value on failure
86 */
87int pkey_skey2pkey(const struct pkey_seckey *seckey,
88 struct pkey_protkey *protkey);
89
90#endif /* _KAPI_PKEY_H */
diff --git a/arch/s390/include/asm/processor.h b/arch/s390/include/asm/processor.h
index dacba341e475..e4988710aa86 100644
--- a/arch/s390/include/asm/processor.h
+++ b/arch/s390/include/asm/processor.h
@@ -14,14 +14,16 @@
14#include <linux/const.h> 14#include <linux/const.h>
15 15
16#define CIF_MCCK_PENDING 0 /* machine check handling is pending */ 16#define CIF_MCCK_PENDING 0 /* machine check handling is pending */
17#define CIF_ASCE 1 /* user asce needs fixup / uaccess */ 17#define CIF_ASCE_PRIMARY 1 /* primary asce needs fixup / uaccess */
18#define CIF_NOHZ_DELAY 2 /* delay HZ disable for a tick */ 18#define CIF_ASCE_SECONDARY 2 /* secondary asce needs fixup / uaccess */
19#define CIF_FPU 3 /* restore FPU registers */ 19#define CIF_NOHZ_DELAY 3 /* delay HZ disable for a tick */
20#define CIF_IGNORE_IRQ 4 /* ignore interrupt (for udelay) */ 20#define CIF_FPU 4 /* restore FPU registers */
21#define CIF_ENABLED_WAIT 5 /* in enabled wait state */ 21#define CIF_IGNORE_IRQ 5 /* ignore interrupt (for udelay) */
22#define CIF_ENABLED_WAIT 6 /* in enabled wait state */
22 23
23#define _CIF_MCCK_PENDING _BITUL(CIF_MCCK_PENDING) 24#define _CIF_MCCK_PENDING _BITUL(CIF_MCCK_PENDING)
24#define _CIF_ASCE _BITUL(CIF_ASCE) 25#define _CIF_ASCE_PRIMARY _BITUL(CIF_ASCE_PRIMARY)
26#define _CIF_ASCE_SECONDARY _BITUL(CIF_ASCE_SECONDARY)
25#define _CIF_NOHZ_DELAY _BITUL(CIF_NOHZ_DELAY) 27#define _CIF_NOHZ_DELAY _BITUL(CIF_NOHZ_DELAY)
26#define _CIF_FPU _BITUL(CIF_FPU) 28#define _CIF_FPU _BITUL(CIF_FPU)
27#define _CIF_IGNORE_IRQ _BITUL(CIF_IGNORE_IRQ) 29#define _CIF_IGNORE_IRQ _BITUL(CIF_IGNORE_IRQ)
@@ -89,7 +91,8 @@ extern void execve_tail(void);
89 * User space process size: 2GB for 31 bit, 4TB or 8PT for 64 bit. 91 * User space process size: 2GB for 31 bit, 4TB or 8PT for 64 bit.
90 */ 92 */
91 93
92#define TASK_SIZE_OF(tsk) ((tsk)->mm->context.asce_limit) 94#define TASK_SIZE_OF(tsk) ((tsk)->mm ? \
95 (tsk)->mm->context.asce_limit : TASK_MAX_SIZE)
93#define TASK_UNMAPPED_BASE (test_thread_flag(TIF_31BIT) ? \ 96#define TASK_UNMAPPED_BASE (test_thread_flag(TIF_31BIT) ? \
94 (1UL << 30) : (1UL << 41)) 97 (1UL << 30) : (1UL << 41))
95#define TASK_SIZE TASK_SIZE_OF(current) 98#define TASK_SIZE TASK_SIZE_OF(current)
@@ -200,10 +203,12 @@ struct stack_frame {
200struct task_struct; 203struct task_struct;
201struct mm_struct; 204struct mm_struct;
202struct seq_file; 205struct seq_file;
206struct pt_regs;
203 207
204typedef int (*dump_trace_func_t)(void *data, unsigned long address, int reliable); 208typedef int (*dump_trace_func_t)(void *data, unsigned long address, int reliable);
205void dump_trace(dump_trace_func_t func, void *data, 209void dump_trace(dump_trace_func_t func, void *data,
206 struct task_struct *task, unsigned long sp); 210 struct task_struct *task, unsigned long sp);
211void show_registers(struct pt_regs *regs);
207 212
208void show_cacheinfo(struct seq_file *m); 213void show_cacheinfo(struct seq_file *m);
209 214
diff --git a/arch/s390/include/asm/uaccess.h b/arch/s390/include/asm/uaccess.h
index b2988fc60f65..136932ff4250 100644
--- a/arch/s390/include/asm/uaccess.h
+++ b/arch/s390/include/asm/uaccess.h
@@ -14,6 +14,7 @@
14 */ 14 */
15#include <linux/sched.h> 15#include <linux/sched.h>
16#include <linux/errno.h> 16#include <linux/errno.h>
17#include <asm/processor.h>
17#include <asm/ctl_reg.h> 18#include <asm/ctl_reg.h>
18 19
19#define VERIFY_READ 0 20#define VERIFY_READ 0
@@ -36,18 +37,20 @@
36 37
37#define get_ds() (KERNEL_DS) 38#define get_ds() (KERNEL_DS)
38#define get_fs() (current->thread.mm_segment) 39#define get_fs() (current->thread.mm_segment)
39
40#define set_fs(x) \
41do { \
42 unsigned long __pto; \
43 current->thread.mm_segment = (x); \
44 __pto = current->thread.mm_segment.ar4 ? \
45 S390_lowcore.user_asce : S390_lowcore.kernel_asce; \
46 __ctl_load(__pto, 7, 7); \
47} while (0)
48
49#define segment_eq(a,b) ((a).ar4 == (b).ar4) 40#define segment_eq(a,b) ((a).ar4 == (b).ar4)
50 41
42static inline void set_fs(mm_segment_t fs)
43{
44 current->thread.mm_segment = fs;
45 if (segment_eq(fs, KERNEL_DS)) {
46 set_cpu_flag(CIF_ASCE_SECONDARY);
47 __ctl_load(S390_lowcore.kernel_asce, 7, 7);
48 } else {
49 clear_cpu_flag(CIF_ASCE_SECONDARY);
50 __ctl_load(S390_lowcore.user_asce, 7, 7);
51 }
52}
53
51static inline int __range_ok(unsigned long addr, unsigned long size) 54static inline int __range_ok(unsigned long addr, unsigned long size)
52{ 55{
53 return 1; 56 return 1;
diff --git a/arch/s390/include/uapi/asm/Kbuild b/arch/s390/include/uapi/asm/Kbuild
index bf736e764cb4..6848ba5c1454 100644
--- a/arch/s390/include/uapi/asm/Kbuild
+++ b/arch/s390/include/uapi/asm/Kbuild
@@ -24,6 +24,7 @@ header-y += mman.h
24header-y += monwriter.h 24header-y += monwriter.h
25header-y += msgbuf.h 25header-y += msgbuf.h
26header-y += param.h 26header-y += param.h
27header-y += pkey.h
27header-y += poll.h 28header-y += poll.h
28header-y += posix_types.h 29header-y += posix_types.h
29header-y += ptrace.h 30header-y += ptrace.h
diff --git a/arch/s390/include/uapi/asm/pkey.h b/arch/s390/include/uapi/asm/pkey.h
new file mode 100644
index 000000000000..ed7f19c27ce5
--- /dev/null
+++ b/arch/s390/include/uapi/asm/pkey.h
@@ -0,0 +1,112 @@
1/*
2 * Userspace interface to the pkey device driver
3 *
4 * Copyright IBM Corp. 2017
5 *
6 * Author: Harald Freudenberger <freude@de.ibm.com>
7 *
8 */
9
10#ifndef _UAPI_PKEY_H
11#define _UAPI_PKEY_H
12
13#include <linux/ioctl.h>
14#include <linux/types.h>
15
16/*
17 * Ioctl calls supported by the pkey device driver
18 */
19
20#define PKEY_IOCTL_MAGIC 'p'
21
22#define SECKEYBLOBSIZE 64 /* secure key blob size is always 64 bytes */
23#define MAXPROTKEYSIZE 64 /* a protected key blob may be up to 64 bytes */
24#define MAXCLRKEYSIZE 32 /* a clear key value may be up to 32 bytes */
25
26/* defines for the type field within the pkey_protkey struct */
27#define PKEY_KEYTYPE_AES_128 1
28#define PKEY_KEYTYPE_AES_192 2
29#define PKEY_KEYTYPE_AES_256 3
30
31/* Struct to hold a secure key blob */
32struct pkey_seckey {
33 __u8 seckey[SECKEYBLOBSIZE]; /* the secure key blob */
34};
35
36/* Struct to hold protected key and length info */
37struct pkey_protkey {
38 __u32 type; /* key type, one of the PKEY_KEYTYPE values */
39 __u32 len; /* bytes actually stored in protkey[] */
40 __u8 protkey[MAXPROTKEYSIZE]; /* the protected key blob */
41};
42
43/* Struct to hold a clear key value */
44struct pkey_clrkey {
45 __u8 clrkey[MAXCLRKEYSIZE]; /* 16, 24, or 32 byte clear key value */
46};
47
48/*
49 * Generate secure key
50 */
51struct pkey_genseck {
52 __u16 cardnr; /* in: card to use or FFFF for any */
53 __u16 domain; /* in: domain or FFFF for any */
54 __u32 keytype; /* in: key type to generate */
55 struct pkey_seckey seckey; /* out: the secure key blob */
56};
57#define PKEY_GENSECK _IOWR(PKEY_IOCTL_MAGIC, 0x01, struct pkey_genseck)
58
59/*
60 * Construct secure key from clear key value
61 */
62struct pkey_clr2seck {
63 __u16 cardnr; /* in: card to use or FFFF for any */
64 __u16 domain; /* in: domain or FFFF for any */
65 __u32 keytype; /* in: key type to generate */
66 struct pkey_clrkey clrkey; /* in: the clear key value */
67 struct pkey_seckey seckey; /* out: the secure key blob */
68};
69#define PKEY_CLR2SECK _IOWR(PKEY_IOCTL_MAGIC, 0x02, struct pkey_clr2seck)
70
71/*
72 * Fabricate protected key from a secure key
73 */
74struct pkey_sec2protk {
75 __u16 cardnr; /* in: card to use or FFFF for any */
76 __u16 domain; /* in: domain or FFFF for any */
77 struct pkey_seckey seckey; /* in: the secure key blob */
78 struct pkey_protkey protkey; /* out: the protected key */
79};
80#define PKEY_SEC2PROTK _IOWR(PKEY_IOCTL_MAGIC, 0x03, struct pkey_sec2protk)
81
82/*
83 * Fabricate protected key from an clear key value
84 */
85struct pkey_clr2protk {
86 __u32 keytype; /* in: key type to generate */
87 struct pkey_clrkey clrkey; /* in: the clear key value */
88 struct pkey_protkey protkey; /* out: the protected key */
89};
90#define PKEY_CLR2PROTK _IOWR(PKEY_IOCTL_MAGIC, 0x04, struct pkey_clr2protk)
91
92/*
93 * Search for matching crypto card based on the Master Key
94 * Verification Pattern provided inside a secure key.
95 */
96struct pkey_findcard {
97 struct pkey_seckey seckey; /* in: the secure key blob */
98 __u16 cardnr; /* out: card number */
99 __u16 domain; /* out: domain number */
100};
101#define PKEY_FINDCARD _IOWR(PKEY_IOCTL_MAGIC, 0x05, struct pkey_findcard)
102
103/*
104 * Combined together: findcard + sec2prot
105 */
106struct pkey_skey2pkey {
107 struct pkey_seckey seckey; /* in: the secure key blob */
108 struct pkey_protkey protkey; /* out: the protected key */
109};
110#define PKEY_SKEY2PKEY _IOWR(PKEY_IOCTL_MAGIC, 0x06, struct pkey_skey2pkey)
111
112#endif /* _UAPI_PKEY_H */
diff --git a/arch/s390/kernel/entry.S b/arch/s390/kernel/entry.S
index db469fa11462..dff2152350a7 100644
--- a/arch/s390/kernel/entry.S
+++ b/arch/s390/kernel/entry.S
@@ -50,7 +50,8 @@ _TIF_WORK = (_TIF_SIGPENDING | _TIF_NOTIFY_RESUME | _TIF_NEED_RESCHED | \
50 _TIF_UPROBE) 50 _TIF_UPROBE)
51_TIF_TRACE = (_TIF_SYSCALL_TRACE | _TIF_SYSCALL_AUDIT | _TIF_SECCOMP | \ 51_TIF_TRACE = (_TIF_SYSCALL_TRACE | _TIF_SYSCALL_AUDIT | _TIF_SECCOMP | \
52 _TIF_SYSCALL_TRACEPOINT) 52 _TIF_SYSCALL_TRACEPOINT)
53_CIF_WORK = (_CIF_MCCK_PENDING | _CIF_ASCE | _CIF_FPU) 53_CIF_WORK = (_CIF_MCCK_PENDING | _CIF_ASCE_PRIMARY | \
54 _CIF_ASCE_SECONDARY | _CIF_FPU)
54_PIF_WORK = (_PIF_PER_TRAP) 55_PIF_WORK = (_PIF_PER_TRAP)
55 56
56#define BASED(name) name-cleanup_critical(%r13) 57#define BASED(name) name-cleanup_critical(%r13)
@@ -339,8 +340,8 @@ ENTRY(system_call)
339 jo .Lsysc_notify_resume 340 jo .Lsysc_notify_resume
340 TSTMSK __LC_CPU_FLAGS,_CIF_FPU 341 TSTMSK __LC_CPU_FLAGS,_CIF_FPU
341 jo .Lsysc_vxrs 342 jo .Lsysc_vxrs
342 TSTMSK __LC_CPU_FLAGS,_CIF_ASCE 343 TSTMSK __LC_CPU_FLAGS,(_CIF_ASCE_PRIMARY|_CIF_ASCE_SECONDARY)
343 jo .Lsysc_uaccess 344 jnz .Lsysc_asce
344 j .Lsysc_return # beware of critical section cleanup 345 j .Lsysc_return # beware of critical section cleanup
345 346
346# 347#
@@ -358,12 +359,15 @@ ENTRY(system_call)
358 jg s390_handle_mcck # TIF bit will be cleared by handler 359 jg s390_handle_mcck # TIF bit will be cleared by handler
359 360
360# 361#
361# _CIF_ASCE is set, load user space asce 362# _CIF_ASCE_PRIMARY and/or CIF_ASCE_SECONDARY set, load user space asce
362# 363#
363.Lsysc_uaccess: 364.Lsysc_asce:
364 ni __LC_CPU_FLAGS+7,255-_CIF_ASCE 365 ni __LC_CPU_FLAGS+7,255-_CIF_ASCE_PRIMARY
365 lctlg %c1,%c1,__LC_USER_ASCE # load primary asce 366 lctlg %c1,%c1,__LC_USER_ASCE # load primary asce
366 j .Lsysc_return 367 TSTMSK __LC_CPU_FLAGS,_CIF_ASCE_SECONDARY
368 jz .Lsysc_return
369 larl %r14,.Lsysc_return
370 jg set_fs_fixup
367 371
368# 372#
369# CIF_FPU is set, restore floating-point controls and floating-point registers. 373# CIF_FPU is set, restore floating-point controls and floating-point registers.
@@ -661,8 +665,8 @@ ENTRY(io_int_handler)
661 jo .Lio_notify_resume 665 jo .Lio_notify_resume
662 TSTMSK __LC_CPU_FLAGS,_CIF_FPU 666 TSTMSK __LC_CPU_FLAGS,_CIF_FPU
663 jo .Lio_vxrs 667 jo .Lio_vxrs
664 TSTMSK __LC_CPU_FLAGS,_CIF_ASCE 668 TSTMSK __LC_CPU_FLAGS,(_CIF_ASCE_PRIMARY|_CIF_ASCE_SECONDARY)
665 jo .Lio_uaccess 669 jnz .Lio_asce
666 j .Lio_return # beware of critical section cleanup 670 j .Lio_return # beware of critical section cleanup
667 671
668# 672#
@@ -675,12 +679,15 @@ ENTRY(io_int_handler)
675 j .Lio_return 679 j .Lio_return
676 680
677# 681#
678# _CIF_ASCE is set, load user space asce 682# _CIF_ASCE_PRIMARY and/or CIF_ASCE_SECONDARY set, load user space asce
679# 683#
680.Lio_uaccess: 684.Lio_asce:
681 ni __LC_CPU_FLAGS+7,255-_CIF_ASCE 685 ni __LC_CPU_FLAGS+7,255-_CIF_ASCE_PRIMARY
682 lctlg %c1,%c1,__LC_USER_ASCE # load primary asce 686 lctlg %c1,%c1,__LC_USER_ASCE # load primary asce
683 j .Lio_return 687 TSTMSK __LC_CPU_FLAGS,_CIF_ASCE_SECONDARY
688 jz .Lio_return
689 larl %r14,.Lio_return
690 jg set_fs_fixup
684 691
685# 692#
686# CIF_FPU is set, restore floating-point controls and floating-point registers. 693# CIF_FPU is set, restore floating-point controls and floating-point registers.
diff --git a/arch/s390/kernel/entry.h b/arch/s390/kernel/entry.h
index e79f030dd276..33f901865326 100644
--- a/arch/s390/kernel/entry.h
+++ b/arch/s390/kernel/entry.h
@@ -80,5 +80,6 @@ long sys_s390_pci_mmio_read(unsigned long, void __user *, size_t);
80DECLARE_PER_CPU(u64, mt_cycles[8]); 80DECLARE_PER_CPU(u64, mt_cycles[8]);
81 81
82void verify_facilities(void); 82void verify_facilities(void);
83void set_fs_fixup(void);
83 84
84#endif /* _ENTRY_H */ 85#endif /* _ENTRY_H */
diff --git a/arch/s390/kernel/nmi.c b/arch/s390/kernel/nmi.c
index 56e14d073167..80c093e0c6f1 100644
--- a/arch/s390/kernel/nmi.c
+++ b/arch/s390/kernel/nmi.c
@@ -116,6 +116,19 @@ static int notrace s390_validate_registers(union mci mci, int umode)
116 s390_handle_damage(); 116 s390_handle_damage();
117 kill_task = 1; 117 kill_task = 1;
118 } 118 }
119 /* Validate control registers */
120 if (!mci.cr) {
121 /*
122 * Control registers have unknown contents.
123 * Can't recover and therefore stopping machine.
124 */
125 s390_handle_damage();
126 } else {
127 asm volatile(
128 " lctlg 0,15,0(%0)\n"
129 " ptlb\n"
130 : : "a" (&S390_lowcore.cregs_save_area) : "memory");
131 }
119 if (!mci.fp) { 132 if (!mci.fp) {
120 /* 133 /*
121 * Floating point registers can't be restored. If the 134 * Floating point registers can't be restored. If the
@@ -208,18 +221,6 @@ static int notrace s390_validate_registers(union mci mci, int umode)
208 */ 221 */
209 kill_task = 1; 222 kill_task = 1;
210 } 223 }
211 /* Validate control registers */
212 if (!mci.cr) {
213 /*
214 * Control registers have unknown contents.
215 * Can't recover and therefore stopping machine.
216 */
217 s390_handle_damage();
218 } else {
219 asm volatile(
220 " lctlg 0,15,0(%0)"
221 : : "a" (&S390_lowcore.cregs_save_area) : "memory");
222 }
223 /* 224 /*
224 * We don't even try to validate the TOD register, since we simply 225 * We don't even try to validate the TOD register, since we simply
225 * can't write something sensible into that register. 226 * can't write something sensible into that register.
diff --git a/arch/s390/kernel/process.c b/arch/s390/kernel/process.c
index c5b86b4a1a8b..54281660582c 100644
--- a/arch/s390/kernel/process.c
+++ b/arch/s390/kernel/process.c
@@ -100,8 +100,8 @@ int arch_dup_task_struct(struct task_struct *dst, struct task_struct *src)
100 return 0; 100 return 0;
101} 101}
102 102
103int copy_thread(unsigned long clone_flags, unsigned long new_stackp, 103int copy_thread_tls(unsigned long clone_flags, unsigned long new_stackp,
104 unsigned long arg, struct task_struct *p) 104 unsigned long arg, struct task_struct *p, unsigned long tls)
105{ 105{
106 struct fake_frame 106 struct fake_frame
107 { 107 {
@@ -156,7 +156,6 @@ int copy_thread(unsigned long clone_flags, unsigned long new_stackp,
156 156
157 /* Set a new TLS ? */ 157 /* Set a new TLS ? */
158 if (clone_flags & CLONE_SETTLS) { 158 if (clone_flags & CLONE_SETTLS) {
159 unsigned long tls = frame->childregs.gprs[6];
160 if (is_compat_task()) { 159 if (is_compat_task()) {
161 p->thread.acrs[0] = (unsigned int)tls; 160 p->thread.acrs[0] = (unsigned int)tls;
162 } else { 161 } else {
@@ -234,3 +233,16 @@ unsigned long arch_randomize_brk(struct mm_struct *mm)
234 ret = PAGE_ALIGN(mm->brk + brk_rnd()); 233 ret = PAGE_ALIGN(mm->brk + brk_rnd());
235 return (ret > mm->brk) ? ret : mm->brk; 234 return (ret > mm->brk) ? ret : mm->brk;
236} 235}
236
237void set_fs_fixup(void)
238{
239 struct pt_regs *regs = current_pt_regs();
240 static bool warned;
241
242 set_fs(USER_DS);
243 if (warned)
244 return;
245 WARN(1, "Unbalanced set_fs - int code: 0x%x\n", regs->int_code);
246 show_registers(regs);
247 warned = true;
248}
diff --git a/arch/s390/mm/gmap.c b/arch/s390/mm/gmap.c
index 59ac93714fa4..a07b1ec1391d 100644
--- a/arch/s390/mm/gmap.c
+++ b/arch/s390/mm/gmap.c
@@ -359,8 +359,8 @@ static int __gmap_unlink_by_vmaddr(struct gmap *gmap, unsigned long vmaddr)
359 spin_lock(&gmap->guest_table_lock); 359 spin_lock(&gmap->guest_table_lock);
360 entry = radix_tree_delete(&gmap->host_to_guest, vmaddr >> PMD_SHIFT); 360 entry = radix_tree_delete(&gmap->host_to_guest, vmaddr >> PMD_SHIFT);
361 if (entry) { 361 if (entry) {
362 flush = (*entry != _SEGMENT_ENTRY_INVALID); 362 flush = (*entry != _SEGMENT_ENTRY_EMPTY);
363 *entry = _SEGMENT_ENTRY_INVALID; 363 *entry = _SEGMENT_ENTRY_EMPTY;
364 } 364 }
365 spin_unlock(&gmap->guest_table_lock); 365 spin_unlock(&gmap->guest_table_lock);
366 return flush; 366 return flush;
@@ -589,7 +589,7 @@ int __gmap_link(struct gmap *gmap, unsigned long gaddr, unsigned long vmaddr)
589 return rc; 589 return rc;
590 ptl = pmd_lock(mm, pmd); 590 ptl = pmd_lock(mm, pmd);
591 spin_lock(&gmap->guest_table_lock); 591 spin_lock(&gmap->guest_table_lock);
592 if (*table == _SEGMENT_ENTRY_INVALID) { 592 if (*table == _SEGMENT_ENTRY_EMPTY) {
593 rc = radix_tree_insert(&gmap->host_to_guest, 593 rc = radix_tree_insert(&gmap->host_to_guest,
594 vmaddr >> PMD_SHIFT, table); 594 vmaddr >> PMD_SHIFT, table);
595 if (!rc) 595 if (!rc)
diff --git a/arch/s390/mm/hugetlbpage.c b/arch/s390/mm/hugetlbpage.c
index a03816227719..9b4050caa4e9 100644
--- a/arch/s390/mm/hugetlbpage.c
+++ b/arch/s390/mm/hugetlbpage.c
@@ -62,7 +62,7 @@ static inline unsigned long __pte_to_rste(pte_t pte)
62 rste |= move_set_bit(pte_val(pte), _PAGE_NOEXEC, 62 rste |= move_set_bit(pte_val(pte), _PAGE_NOEXEC,
63 _SEGMENT_ENTRY_NOEXEC); 63 _SEGMENT_ENTRY_NOEXEC);
64 } else 64 } else
65 rste = _SEGMENT_ENTRY_INVALID; 65 rste = _SEGMENT_ENTRY_EMPTY;
66 return rste; 66 return rste;
67} 67}
68 68
diff --git a/drivers/crypto/Kconfig b/drivers/crypto/Kconfig
index 2cac445b02fd..0b49dbc423e2 100644
--- a/drivers/crypto/Kconfig
+++ b/drivers/crypto/Kconfig
@@ -62,19 +62,32 @@ config CRYPTO_DEV_GEODE
62 will be called geode-aes. 62 will be called geode-aes.
63 63
64config ZCRYPT 64config ZCRYPT
65 tristate "Support for PCI-attached cryptographic adapters" 65 tristate "Support for s390 cryptographic adapters"
66 depends on S390 66 depends on S390
67 select HW_RANDOM 67 select HW_RANDOM
68 help 68 help
69 Select this option if you want to use a PCI-attached cryptographic 69 Select this option if you want to enable support for
70 adapter like: 70 s390 cryptographic adapters like:
71 + PCI Cryptographic Accelerator (PCICA)
72 + PCI Cryptographic Coprocessor (PCICC)
73 + PCI-X Cryptographic Coprocessor (PCIXCC) 71 + PCI-X Cryptographic Coprocessor (PCIXCC)
74 + Crypto Express2 Coprocessor (CEX2C) 72 + Crypto Express 2,3,4 or 5 Coprocessor (CEXxC)
75 + Crypto Express2 Accelerator (CEX2A) 73 + Crypto Express 2,3,4 or 5 Accelerator (CEXxA)
76 + Crypto Express3 Coprocessor (CEX3C) 74 + Crypto Express 4 or 5 EP11 Coprocessor (CEXxP)
77 + Crypto Express3 Accelerator (CEX3A) 75
76config PKEY
77 tristate "Kernel API for protected key handling"
78 depends on S390
79 depends on ZCRYPT
80 help
81 With this option enabled the pkey kernel module provides an API
82 for creation and handling of protected keys. Other parts of the
83 kernel or userspace applications may use these functions.
84
85 Select this option if you want to enable the kernel and userspace
86 API for proteced key handling.
87
88 Please note that creation of protected keys from secure keys
89 requires to have at least one CEX card in coprocessor mode
90 available at runtime.
78 91
79config CRYPTO_SHA1_S390 92config CRYPTO_SHA1_S390
80 tristate "SHA1 digest algorithm" 93 tristate "SHA1 digest algorithm"
@@ -124,6 +137,7 @@ config CRYPTO_AES_S390
124 depends on S390 137 depends on S390
125 select CRYPTO_ALGAPI 138 select CRYPTO_ALGAPI
126 select CRYPTO_BLKCIPHER 139 select CRYPTO_BLKCIPHER
140 select PKEY
127 help 141 help
128 This is the s390 hardware accelerated implementation of the 142 This is the s390 hardware accelerated implementation of the
129 AES cipher algorithms (FIPS-197). 143 AES cipher algorithms (FIPS-197).
diff --git a/drivers/s390/block/dasd_eckd.c b/drivers/s390/block/dasd_eckd.c
index 0f1713727d4c..0b38217f8147 100644
--- a/drivers/s390/block/dasd_eckd.c
+++ b/drivers/s390/block/dasd_eckd.c
@@ -4864,7 +4864,7 @@ static void dasd_eckd_dump_sense_tcw(struct dasd_device *device,
4864 break; 4864 break;
4865 case 3: /* tsa_intrg */ 4865 case 3: /* tsa_intrg */
4866 len += sprintf(page + len, PRINTK_HEADER 4866 len += sprintf(page + len, PRINTK_HEADER
4867 " tsb->tsa.intrg.: not supportet yet\n"); 4867 " tsb->tsa.intrg.: not supported yet\n");
4868 break; 4868 break;
4869 } 4869 }
4870 4870
diff --git a/drivers/s390/cio/ioasm.c b/drivers/s390/cio/ioasm.c
index 8225da619014..4182f60124da 100644
--- a/drivers/s390/cio/ioasm.c
+++ b/drivers/s390/cio/ioasm.c
@@ -165,13 +165,15 @@ int tpi(struct tpi_info *addr)
165int chsc(void *chsc_area) 165int chsc(void *chsc_area)
166{ 166{
167 typedef struct { char _[4096]; } addr_type; 167 typedef struct { char _[4096]; } addr_type;
168 int cc; 168 int cc = -EIO;
169 169
170 asm volatile( 170 asm volatile(
171 " .insn rre,0xb25f0000,%2,0\n" 171 " .insn rre,0xb25f0000,%2,0\n"
172 " ipm %0\n" 172 "0: ipm %0\n"
173 " srl %0,28\n" 173 " srl %0,28\n"
174 : "=d" (cc), "=m" (*(addr_type *) chsc_area) 174 "1:\n"
175 EX_TABLE(0b, 1b)
176 : "+d" (cc), "=m" (*(addr_type *) chsc_area)
175 : "d" (chsc_area), "m" (*(addr_type *) chsc_area) 177 : "d" (chsc_area), "m" (*(addr_type *) chsc_area)
176 : "cc"); 178 : "cc");
177 trace_s390_cio_chsc(chsc_area, cc); 179 trace_s390_cio_chsc(chsc_area, cc);
diff --git a/drivers/s390/crypto/Makefile b/drivers/s390/crypto/Makefile
index 0a7fb83f35e5..be36f1010d75 100644
--- a/drivers/s390/crypto/Makefile
+++ b/drivers/s390/crypto/Makefile
@@ -10,3 +10,7 @@ zcrypt-objs += zcrypt_msgtype6.o zcrypt_msgtype50.o
10obj-$(CONFIG_ZCRYPT) += zcrypt.o 10obj-$(CONFIG_ZCRYPT) += zcrypt.o
11# adapter drivers depend on ap.o and zcrypt.o 11# adapter drivers depend on ap.o and zcrypt.o
12obj-$(CONFIG_ZCRYPT) += zcrypt_pcixcc.o zcrypt_cex2a.o zcrypt_cex4.o 12obj-$(CONFIG_ZCRYPT) += zcrypt_pcixcc.o zcrypt_cex2a.o zcrypt_cex4.o
13
14# pkey kernel module
15pkey-objs := pkey_api.o
16obj-$(CONFIG_PKEY) += pkey.o
diff --git a/drivers/s390/crypto/ap_bus.c b/drivers/s390/crypto/ap_bus.c
index 56db76c05775..9be4596d8a08 100644
--- a/drivers/s390/crypto/ap_bus.c
+++ b/drivers/s390/crypto/ap_bus.c
@@ -1107,16 +1107,6 @@ static void ap_config_timeout(unsigned long ptr)
1107 queue_work(system_long_wq, &ap_scan_work); 1107 queue_work(system_long_wq, &ap_scan_work);
1108} 1108}
1109 1109
1110static void ap_reset_domain(void)
1111{
1112 int i;
1113
1114 if (ap_domain_index == -1 || !ap_test_config_domain(ap_domain_index))
1115 return;
1116 for (i = 0; i < AP_DEVICES; i++)
1117 ap_rapq(AP_MKQID(i, ap_domain_index));
1118}
1119
1120static void ap_reset_all(void) 1110static void ap_reset_all(void)
1121{ 1111{
1122 int i, j; 1112 int i, j;
diff --git a/drivers/s390/crypto/ap_card.c b/drivers/s390/crypto/ap_card.c
index 1cd9128593e4..cfa161ccc74e 100644
--- a/drivers/s390/crypto/ap_card.c
+++ b/drivers/s390/crypto/ap_card.c
@@ -58,9 +58,9 @@ static ssize_t ap_functions_show(struct device *dev,
58 58
59static DEVICE_ATTR(ap_functions, 0444, ap_functions_show, NULL); 59static DEVICE_ATTR(ap_functions, 0444, ap_functions_show, NULL);
60 60
61static ssize_t ap_request_count_show(struct device *dev, 61static ssize_t ap_req_count_show(struct device *dev,
62 struct device_attribute *attr, 62 struct device_attribute *attr,
63 char *buf) 63 char *buf)
64{ 64{
65 struct ap_card *ac = to_ap_card(dev); 65 struct ap_card *ac = to_ap_card(dev);
66 unsigned int req_cnt; 66 unsigned int req_cnt;
@@ -72,7 +72,23 @@ static ssize_t ap_request_count_show(struct device *dev,
72 return snprintf(buf, PAGE_SIZE, "%d\n", req_cnt); 72 return snprintf(buf, PAGE_SIZE, "%d\n", req_cnt);
73} 73}
74 74
75static DEVICE_ATTR(request_count, 0444, ap_request_count_show, NULL); 75static ssize_t ap_req_count_store(struct device *dev,
76 struct device_attribute *attr,
77 const char *buf, size_t count)
78{
79 struct ap_card *ac = to_ap_card(dev);
80 struct ap_queue *aq;
81
82 spin_lock_bh(&ap_list_lock);
83 for_each_ap_queue(aq, ac)
84 aq->total_request_count = 0;
85 spin_unlock_bh(&ap_list_lock);
86 atomic_set(&ac->total_request_count, 0);
87
88 return count;
89}
90
91static DEVICE_ATTR(request_count, 0644, ap_req_count_show, ap_req_count_store);
76 92
77static ssize_t ap_requestq_count_show(struct device *dev, 93static ssize_t ap_requestq_count_show(struct device *dev,
78 struct device_attribute *attr, char *buf) 94 struct device_attribute *attr, char *buf)
diff --git a/drivers/s390/crypto/ap_queue.c b/drivers/s390/crypto/ap_queue.c
index 7be67fa9f224..480c58a63769 100644
--- a/drivers/s390/crypto/ap_queue.c
+++ b/drivers/s390/crypto/ap_queue.c
@@ -459,9 +459,9 @@ EXPORT_SYMBOL(ap_queue_resume);
459/* 459/*
460 * AP queue related attributes. 460 * AP queue related attributes.
461 */ 461 */
462static ssize_t ap_request_count_show(struct device *dev, 462static ssize_t ap_req_count_show(struct device *dev,
463 struct device_attribute *attr, 463 struct device_attribute *attr,
464 char *buf) 464 char *buf)
465{ 465{
466 struct ap_queue *aq = to_ap_queue(dev); 466 struct ap_queue *aq = to_ap_queue(dev);
467 unsigned int req_cnt; 467 unsigned int req_cnt;
@@ -472,7 +472,20 @@ static ssize_t ap_request_count_show(struct device *dev,
472 return snprintf(buf, PAGE_SIZE, "%d\n", req_cnt); 472 return snprintf(buf, PAGE_SIZE, "%d\n", req_cnt);
473} 473}
474 474
475static DEVICE_ATTR(request_count, 0444, ap_request_count_show, NULL); 475static ssize_t ap_req_count_store(struct device *dev,
476 struct device_attribute *attr,
477 const char *buf, size_t count)
478{
479 struct ap_queue *aq = to_ap_queue(dev);
480
481 spin_lock_bh(&aq->lock);
482 aq->total_request_count = 0;
483 spin_unlock_bh(&aq->lock);
484
485 return count;
486}
487
488static DEVICE_ATTR(request_count, 0644, ap_req_count_show, ap_req_count_store);
476 489
477static ssize_t ap_requestq_count_show(struct device *dev, 490static ssize_t ap_requestq_count_show(struct device *dev,
478 struct device_attribute *attr, char *buf) 491 struct device_attribute *attr, char *buf)
diff --git a/drivers/s390/crypto/pkey_api.c b/drivers/s390/crypto/pkey_api.c
new file mode 100644
index 000000000000..40f1136f5568
--- /dev/null
+++ b/drivers/s390/crypto/pkey_api.c
@@ -0,0 +1,1148 @@
1/*
2 * pkey device driver
3 *
4 * Copyright IBM Corp. 2017
5 * Author(s): Harald Freudenberger
6 *
7 * This program is free software; you can redistribute it and/or modify
8 * it under the terms of the GNU General Public License (version 2 only)
9 * as published by the Free Software Foundation.
10 *
11 */
12
13#define KMSG_COMPONENT "pkey"
14#define pr_fmt(fmt) KMSG_COMPONENT ": " fmt
15
16#include <linux/fs.h>
17#include <linux/init.h>
18#include <linux/miscdevice.h>
19#include <linux/module.h>
20#include <linux/slab.h>
21#include <linux/kallsyms.h>
22#include <linux/debugfs.h>
23#include <asm/zcrypt.h>
24#include <asm/cpacf.h>
25#include <asm/pkey.h>
26
27#include "zcrypt_api.h"
28
29MODULE_LICENSE("GPL");
30MODULE_AUTHOR("IBM Corporation");
31MODULE_DESCRIPTION("s390 protected key interface");
32
33/* Size of parameter block used for all cca requests/replies */
34#define PARMBSIZE 512
35
36/* Size of vardata block used for some of the cca requests/replies */
37#define VARDATASIZE 4096
38
39/*
40 * debug feature data and functions
41 */
42
43static debug_info_t *debug_info;
44
45#define DEBUG_DBG(...) debug_sprintf_event(debug_info, 6, ##__VA_ARGS__)
46#define DEBUG_INFO(...) debug_sprintf_event(debug_info, 5, ##__VA_ARGS__)
47#define DEBUG_WARN(...) debug_sprintf_event(debug_info, 4, ##__VA_ARGS__)
48#define DEBUG_ERR(...) debug_sprintf_event(debug_info, 3, ##__VA_ARGS__)
49
50static void __init pkey_debug_init(void)
51{
52 debug_info = debug_register("pkey", 1, 1, 4 * sizeof(long));
53 debug_register_view(debug_info, &debug_sprintf_view);
54 debug_set_level(debug_info, 3);
55}
56
57static void __exit pkey_debug_exit(void)
58{
59 debug_unregister(debug_info);
60}
61
62/* inside view of a secure key token (only type 0x01 version 0x04) */
63struct secaeskeytoken {
64 u8 type; /* 0x01 for internal key token */
65 u8 res0[3];
66 u8 version; /* should be 0x04 */
67 u8 res1[1];
68 u8 flag; /* key flags */
69 u8 res2[1];
70 u64 mkvp; /* master key verification pattern */
71 u8 key[32]; /* key value (encrypted) */
72 u8 cv[8]; /* control vector */
73 u16 bitsize; /* key bit size */
74 u16 keysize; /* key byte size */
75 u8 tvv[4]; /* token validation value */
76} __packed;
77
78/*
79 * Simple check if the token is a valid CCA secure AES key
80 * token. If keybitsize is given, the bitsize of the key is
81 * also checked. Returns 0 on success or errno value on failure.
82 */
83static int check_secaeskeytoken(u8 *token, int keybitsize)
84{
85 struct secaeskeytoken *t = (struct secaeskeytoken *) token;
86
87 if (t->type != 0x01) {
88 DEBUG_ERR(
89 "check_secaeskeytoken secure token check failed, type mismatch 0x%02x != 0x01\n",
90 (int) t->type);
91 return -EINVAL;
92 }
93 if (t->version != 0x04) {
94 DEBUG_ERR(
95 "check_secaeskeytoken secure token check failed, version mismatch 0x%02x != 0x04\n",
96 (int) t->version);
97 return -EINVAL;
98 }
99 if (keybitsize > 0 && t->bitsize != keybitsize) {
100 DEBUG_ERR(
101 "check_secaeskeytoken secure token check failed, bitsize mismatch %d != %d\n",
102 (int) t->bitsize, keybitsize);
103 return -EINVAL;
104 }
105
106 return 0;
107}
108
109/*
110 * Allocate consecutive memory for request CPRB, request param
111 * block, reply CPRB and reply param block and fill in values
112 * for the common fields. Returns 0 on success or errno value
113 * on failure.
114 */
115static int alloc_and_prep_cprbmem(size_t paramblen,
116 u8 **pcprbmem,
117 struct CPRBX **preqCPRB,
118 struct CPRBX **prepCPRB)
119{
120 u8 *cprbmem;
121 size_t cprbplusparamblen = sizeof(struct CPRBX) + paramblen;
122 struct CPRBX *preqcblk, *prepcblk;
123
124 /*
125 * allocate consecutive memory for request CPRB, request param
126 * block, reply CPRB and reply param block
127 */
128 cprbmem = kmalloc(2 * cprbplusparamblen, GFP_KERNEL);
129 if (!cprbmem)
130 return -ENOMEM;
131 memset(cprbmem, 0, 2 * cprbplusparamblen);
132
133 preqcblk = (struct CPRBX *) cprbmem;
134 prepcblk = (struct CPRBX *) (cprbmem + cprbplusparamblen);
135
136 /* fill request cprb struct */
137 preqcblk->cprb_len = sizeof(struct CPRBX);
138 preqcblk->cprb_ver_id = 0x02;
139 memcpy(preqcblk->func_id, "T2", 2);
140 preqcblk->rpl_msgbl = cprbplusparamblen;
141 if (paramblen) {
142 preqcblk->req_parmb =
143 ((u8 *) preqcblk) + sizeof(struct CPRBX);
144 preqcblk->rpl_parmb =
145 ((u8 *) prepcblk) + sizeof(struct CPRBX);
146 }
147
148 *pcprbmem = cprbmem;
149 *preqCPRB = preqcblk;
150 *prepCPRB = prepcblk;
151
152 return 0;
153}
154
155/*
156 * Free the cprb memory allocated with the function above.
157 * If the scrub value is not zero, the memory is filled
158 * with zeros before freeing (useful if there was some
159 * clear key material in there).
160 */
161static void free_cprbmem(void *mem, size_t paramblen, int scrub)
162{
163 if (scrub)
164 memzero_explicit(mem, 2 * (sizeof(struct CPRBX) + paramblen));
165 kfree(mem);
166}
167
168/*
169 * Helper function to prepare the xcrb struct
170 */
171static inline void prep_xcrb(struct ica_xcRB *pxcrb,
172 u16 cardnr,
173 struct CPRBX *preqcblk,
174 struct CPRBX *prepcblk)
175{
176 memset(pxcrb, 0, sizeof(*pxcrb));
177 pxcrb->agent_ID = 0x4341; /* 'CA' */
178 pxcrb->user_defined = (cardnr == 0xFFFF ? AUTOSELECT : cardnr);
179 pxcrb->request_control_blk_length =
180 preqcblk->cprb_len + preqcblk->req_parml;
181 pxcrb->request_control_blk_addr = (void *) preqcblk;
182 pxcrb->reply_control_blk_length = preqcblk->rpl_msgbl;
183 pxcrb->reply_control_blk_addr = (void *) prepcblk;
184}
185
186/*
187 * Helper function which calls zcrypt_send_cprb with
188 * memory management segment adjusted to kernel space
189 * so that the copy_from_user called within this
190 * function do in fact copy from kernel space.
191 */
192static inline int _zcrypt_send_cprb(struct ica_xcRB *xcrb)
193{
194 int rc;
195 mm_segment_t old_fs = get_fs();
196
197 set_fs(KERNEL_DS);
198 rc = zcrypt_send_cprb(xcrb);
199 set_fs(old_fs);
200
201 return rc;
202}
203
204/*
205 * Generate (random) AES secure key.
206 */
207int pkey_genseckey(u16 cardnr, u16 domain,
208 u32 keytype, struct pkey_seckey *seckey)
209{
210 int i, rc, keysize;
211 int seckeysize;
212 u8 *mem;
213 struct CPRBX *preqcblk, *prepcblk;
214 struct ica_xcRB xcrb;
215 struct kgreqparm {
216 u8 subfunc_code[2];
217 u16 rule_array_len;
218 struct lv1 {
219 u16 len;
220 char key_form[8];
221 char key_length[8];
222 char key_type1[8];
223 char key_type2[8];
224 } lv1;
225 struct lv2 {
226 u16 len;
227 struct keyid {
228 u16 len;
229 u16 attr;
230 u8 data[SECKEYBLOBSIZE];
231 } keyid[6];
232 } lv2;
233 } *preqparm;
234 struct kgrepparm {
235 u8 subfunc_code[2];
236 u16 rule_array_len;
237 struct lv3 {
238 u16 len;
239 u16 keyblocklen;
240 struct {
241 u16 toklen;
242 u16 tokattr;
243 u8 tok[0];
244 /* ... some more data ... */
245 } keyblock;
246 } lv3;
247 } *prepparm;
248
249 /* get already prepared memory for 2 cprbs with param block each */
250 rc = alloc_and_prep_cprbmem(PARMBSIZE, &mem, &preqcblk, &prepcblk);
251 if (rc)
252 return rc;
253
254 /* fill request cprb struct */
255 preqcblk->domain = domain;
256
257 /* fill request cprb param block with KG request */
258 preqparm = (struct kgreqparm *) preqcblk->req_parmb;
259 memcpy(preqparm->subfunc_code, "KG", 2);
260 preqparm->rule_array_len = sizeof(preqparm->rule_array_len);
261 preqparm->lv1.len = sizeof(struct lv1);
262 memcpy(preqparm->lv1.key_form, "OP ", 8);
263 switch (keytype) {
264 case PKEY_KEYTYPE_AES_128:
265 keysize = 16;
266 memcpy(preqparm->lv1.key_length, "KEYLN16 ", 8);
267 break;
268 case PKEY_KEYTYPE_AES_192:
269 keysize = 24;
270 memcpy(preqparm->lv1.key_length, "KEYLN24 ", 8);
271 break;
272 case PKEY_KEYTYPE_AES_256:
273 keysize = 32;
274 memcpy(preqparm->lv1.key_length, "KEYLN32 ", 8);
275 break;
276 default:
277 DEBUG_ERR(
278 "pkey_genseckey unknown/unsupported keytype %d\n",
279 keytype);
280 rc = -EINVAL;
281 goto out;
282 }
283 memcpy(preqparm->lv1.key_type1, "AESDATA ", 8);
284 preqparm->lv2.len = sizeof(struct lv2);
285 for (i = 0; i < 6; i++) {
286 preqparm->lv2.keyid[i].len = sizeof(struct keyid);
287 preqparm->lv2.keyid[i].attr = (i == 2 ? 0x30 : 0x10);
288 }
289 preqcblk->req_parml = sizeof(struct kgreqparm);
290
291 /* fill xcrb struct */
292 prep_xcrb(&xcrb, cardnr, preqcblk, prepcblk);
293
294 /* forward xcrb with request CPRB and reply CPRB to zcrypt dd */
295 rc = _zcrypt_send_cprb(&xcrb);
296 if (rc) {
297 DEBUG_ERR(
298 "pkey_genseckey zcrypt_send_cprb (cardnr=%d domain=%d) failed with errno %d\n",
299 (int) cardnr, (int) domain, rc);
300 goto out;
301 }
302
303 /* check response returncode and reasoncode */
304 if (prepcblk->ccp_rtcode != 0) {
305 DEBUG_ERR(
306 "pkey_genseckey secure key generate failure, card response %d/%d\n",
307 (int) prepcblk->ccp_rtcode,
308 (int) prepcblk->ccp_rscode);
309 rc = -EIO;
310 goto out;
311 }
312
313 /* process response cprb param block */
314 prepcblk->rpl_parmb = ((u8 *) prepcblk) + sizeof(struct CPRBX);
315 prepparm = (struct kgrepparm *) prepcblk->rpl_parmb;
316
317 /* check length of the returned secure key token */
318 seckeysize = prepparm->lv3.keyblock.toklen
319 - sizeof(prepparm->lv3.keyblock.toklen)
320 - sizeof(prepparm->lv3.keyblock.tokattr);
321 if (seckeysize != SECKEYBLOBSIZE) {
322 DEBUG_ERR(
323 "pkey_genseckey secure token size mismatch %d != %d bytes\n",
324 seckeysize, SECKEYBLOBSIZE);
325 rc = -EIO;
326 goto out;
327 }
328
329 /* check secure key token */
330 rc = check_secaeskeytoken(prepparm->lv3.keyblock.tok, 8*keysize);
331 if (rc) {
332 rc = -EIO;
333 goto out;
334 }
335
336 /* copy the generated secure key token */
337 memcpy(seckey->seckey, prepparm->lv3.keyblock.tok, SECKEYBLOBSIZE);
338
339out:
340 free_cprbmem(mem, PARMBSIZE, 0);
341 return rc;
342}
343EXPORT_SYMBOL(pkey_genseckey);
344
345/*
346 * Generate an AES secure key with given key value.
347 */
348int pkey_clr2seckey(u16 cardnr, u16 domain, u32 keytype,
349 const struct pkey_clrkey *clrkey,
350 struct pkey_seckey *seckey)
351{
352 int rc, keysize, seckeysize;
353 u8 *mem;
354 struct CPRBX *preqcblk, *prepcblk;
355 struct ica_xcRB xcrb;
356 struct cmreqparm {
357 u8 subfunc_code[2];
358 u16 rule_array_len;
359 char rule_array[8];
360 struct lv1 {
361 u16 len;
362 u8 clrkey[0];
363 } lv1;
364 struct lv2 {
365 u16 len;
366 struct keyid {
367 u16 len;
368 u16 attr;
369 u8 data[SECKEYBLOBSIZE];
370 } keyid;
371 } lv2;
372 } *preqparm;
373 struct lv2 *plv2;
374 struct cmrepparm {
375 u8 subfunc_code[2];
376 u16 rule_array_len;
377 struct lv3 {
378 u16 len;
379 u16 keyblocklen;
380 struct {
381 u16 toklen;
382 u16 tokattr;
383 u8 tok[0];
384 /* ... some more data ... */
385 } keyblock;
386 } lv3;
387 } *prepparm;
388
389 /* get already prepared memory for 2 cprbs with param block each */
390 rc = alloc_and_prep_cprbmem(PARMBSIZE, &mem, &preqcblk, &prepcblk);
391 if (rc)
392 return rc;
393
394 /* fill request cprb struct */
395 preqcblk->domain = domain;
396
397 /* fill request cprb param block with CM request */
398 preqparm = (struct cmreqparm *) preqcblk->req_parmb;
399 memcpy(preqparm->subfunc_code, "CM", 2);
400 memcpy(preqparm->rule_array, "AES ", 8);
401 preqparm->rule_array_len =
402 sizeof(preqparm->rule_array_len) + sizeof(preqparm->rule_array);
403 switch (keytype) {
404 case PKEY_KEYTYPE_AES_128:
405 keysize = 16;
406 break;
407 case PKEY_KEYTYPE_AES_192:
408 keysize = 24;
409 break;
410 case PKEY_KEYTYPE_AES_256:
411 keysize = 32;
412 break;
413 default:
414 DEBUG_ERR(
415 "pkey_clr2seckey unknown/unsupported keytype %d\n",
416 keytype);
417 rc = -EINVAL;
418 goto out;
419 }
420 preqparm->lv1.len = sizeof(struct lv1) + keysize;
421 memcpy(preqparm->lv1.clrkey, clrkey->clrkey, keysize);
422 plv2 = (struct lv2 *) (((u8 *) &preqparm->lv2) + keysize);
423 plv2->len = sizeof(struct lv2);
424 plv2->keyid.len = sizeof(struct keyid);
425 plv2->keyid.attr = 0x30;
426 preqcblk->req_parml = sizeof(struct cmreqparm) + keysize;
427
428 /* fill xcrb struct */
429 prep_xcrb(&xcrb, cardnr, preqcblk, prepcblk);
430
431 /* forward xcrb with request CPRB and reply CPRB to zcrypt dd */
432 rc = _zcrypt_send_cprb(&xcrb);
433 if (rc) {
434 DEBUG_ERR(
435 "pkey_clr2seckey zcrypt_send_cprb (cardnr=%d domain=%d) failed with errno %d\n",
436 (int) cardnr, (int) domain, rc);
437 goto out;
438 }
439
440 /* check response returncode and reasoncode */
441 if (prepcblk->ccp_rtcode != 0) {
442 DEBUG_ERR(
443 "pkey_clr2seckey clear key import failure, card response %d/%d\n",
444 (int) prepcblk->ccp_rtcode,
445 (int) prepcblk->ccp_rscode);
446 rc = -EIO;
447 goto out;
448 }
449
450 /* process response cprb param block */
451 prepcblk->rpl_parmb = ((u8 *) prepcblk) + sizeof(struct CPRBX);
452 prepparm = (struct cmrepparm *) prepcblk->rpl_parmb;
453
454 /* check length of the returned secure key token */
455 seckeysize = prepparm->lv3.keyblock.toklen
456 - sizeof(prepparm->lv3.keyblock.toklen)
457 - sizeof(prepparm->lv3.keyblock.tokattr);
458 if (seckeysize != SECKEYBLOBSIZE) {
459 DEBUG_ERR(
460 "pkey_clr2seckey secure token size mismatch %d != %d bytes\n",
461 seckeysize, SECKEYBLOBSIZE);
462 rc = -EIO;
463 goto out;
464 }
465
466 /* check secure key token */
467 rc = check_secaeskeytoken(prepparm->lv3.keyblock.tok, 8*keysize);
468 if (rc) {
469 rc = -EIO;
470 goto out;
471 }
472
473 /* copy the generated secure key token */
474 memcpy(seckey->seckey, prepparm->lv3.keyblock.tok, SECKEYBLOBSIZE);
475
476out:
477 free_cprbmem(mem, PARMBSIZE, 1);
478 return rc;
479}
480EXPORT_SYMBOL(pkey_clr2seckey);
481
482/*
483 * Derive a proteced key from the secure key blob.
484 */
485int pkey_sec2protkey(u16 cardnr, u16 domain,
486 const struct pkey_seckey *seckey,
487 struct pkey_protkey *protkey)
488{
489 int rc;
490 u8 *mem;
491 struct CPRBX *preqcblk, *prepcblk;
492 struct ica_xcRB xcrb;
493 struct uskreqparm {
494 u8 subfunc_code[2];
495 u16 rule_array_len;
496 struct lv1 {
497 u16 len;
498 u16 attr_len;
499 u16 attr_flags;
500 } lv1;
501 struct lv2 {
502 u16 len;
503 u16 attr_len;
504 u16 attr_flags;
505 u8 token[0]; /* cca secure key token */
506 } lv2 __packed;
507 } *preqparm;
508 struct uskrepparm {
509 u8 subfunc_code[2];
510 u16 rule_array_len;
511 struct lv3 {
512 u16 len;
513 u16 attr_len;
514 u16 attr_flags;
515 struct cpacfkeyblock {
516 u8 version; /* version of this struct */
517 u8 flags[2];
518 u8 algo;
519 u8 form;
520 u8 pad1[3];
521 u16 keylen;
522 u8 key[64]; /* the key (keylen bytes) */
523 u16 keyattrlen;
524 u8 keyattr[32];
525 u8 pad2[1];
526 u8 vptype;
527 u8 vp[32]; /* verification pattern */
528 } keyblock;
529 } lv3 __packed;
530 } *prepparm;
531
532 /* get already prepared memory for 2 cprbs with param block each */
533 rc = alloc_and_prep_cprbmem(PARMBSIZE, &mem, &preqcblk, &prepcblk);
534 if (rc)
535 return rc;
536
537 /* fill request cprb struct */
538 preqcblk->domain = domain;
539
540 /* fill request cprb param block with USK request */
541 preqparm = (struct uskreqparm *) preqcblk->req_parmb;
542 memcpy(preqparm->subfunc_code, "US", 2);
543 preqparm->rule_array_len = sizeof(preqparm->rule_array_len);
544 preqparm->lv1.len = sizeof(struct lv1);
545 preqparm->lv1.attr_len = sizeof(struct lv1) - sizeof(preqparm->lv1.len);
546 preqparm->lv1.attr_flags = 0x0001;
547 preqparm->lv2.len = sizeof(struct lv2) + SECKEYBLOBSIZE;
548 preqparm->lv2.attr_len = sizeof(struct lv2)
549 - sizeof(preqparm->lv2.len) + SECKEYBLOBSIZE;
550 preqparm->lv2.attr_flags = 0x0000;
551 memcpy(preqparm->lv2.token, seckey->seckey, SECKEYBLOBSIZE);
552 preqcblk->req_parml = sizeof(struct uskreqparm) + SECKEYBLOBSIZE;
553
554 /* fill xcrb struct */
555 prep_xcrb(&xcrb, cardnr, preqcblk, prepcblk);
556
557 /* forward xcrb with request CPRB and reply CPRB to zcrypt dd */
558 rc = _zcrypt_send_cprb(&xcrb);
559 if (rc) {
560 DEBUG_ERR(
561 "pkey_sec2protkey zcrypt_send_cprb (cardnr=%d domain=%d) failed with errno %d\n",
562 (int) cardnr, (int) domain, rc);
563 goto out;
564 }
565
566 /* check response returncode and reasoncode */
567 if (prepcblk->ccp_rtcode != 0) {
568 DEBUG_ERR(
569 "pkey_sec2protkey unwrap secure key failure, card response %d/%d\n",
570 (int) prepcblk->ccp_rtcode,
571 (int) prepcblk->ccp_rscode);
572 rc = -EIO;
573 goto out;
574 }
575
576 /* process response cprb param block */
577 prepcblk->rpl_parmb = ((u8 *) prepcblk) + sizeof(struct CPRBX);
578 prepparm = (struct uskrepparm *) prepcblk->rpl_parmb;
579
580 /* check the returned keyblock */
581 if (prepparm->lv3.keyblock.version != 0x01) {
582 DEBUG_ERR(
583 "pkey_sec2protkey reply param keyblock version mismatch 0x%02x != 0x01\n",
584 (int) prepparm->lv3.keyblock.version);
585 rc = -EIO;
586 goto out;
587 }
588
589 /* copy the tanslated protected key */
590 switch (prepparm->lv3.keyblock.keylen) {
591 case 16+32:
592 protkey->type = PKEY_KEYTYPE_AES_128;
593 break;
594 case 24+32:
595 protkey->type = PKEY_KEYTYPE_AES_192;
596 break;
597 case 32+32:
598 protkey->type = PKEY_KEYTYPE_AES_256;
599 break;
600 default:
601 DEBUG_ERR("pkey_sec2protkey unknown/unsupported keytype %d\n",
602 prepparm->lv3.keyblock.keylen);
603 rc = -EIO;
604 goto out;
605 }
606 protkey->len = prepparm->lv3.keyblock.keylen;
607 memcpy(protkey->protkey, prepparm->lv3.keyblock.key, protkey->len);
608
609out:
610 free_cprbmem(mem, PARMBSIZE, 0);
611 return rc;
612}
613EXPORT_SYMBOL(pkey_sec2protkey);
614
615/*
616 * Create a protected key from a clear key value.
617 */
618int pkey_clr2protkey(u32 keytype,
619 const struct pkey_clrkey *clrkey,
620 struct pkey_protkey *protkey)
621{
622 long fc;
623 int keysize;
624 u8 paramblock[64];
625
626 switch (keytype) {
627 case PKEY_KEYTYPE_AES_128:
628 keysize = 16;
629 fc = CPACF_PCKMO_ENC_AES_128_KEY;
630 break;
631 case PKEY_KEYTYPE_AES_192:
632 keysize = 24;
633 fc = CPACF_PCKMO_ENC_AES_192_KEY;
634 break;
635 case PKEY_KEYTYPE_AES_256:
636 keysize = 32;
637 fc = CPACF_PCKMO_ENC_AES_256_KEY;
638 break;
639 default:
640 DEBUG_ERR("pkey_clr2protkey unknown/unsupported keytype %d\n",
641 keytype);
642 return -EINVAL;
643 }
644
645 /* prepare param block */
646 memset(paramblock, 0, sizeof(paramblock));
647 memcpy(paramblock, clrkey->clrkey, keysize);
648
649 /* call the pckmo instruction */
650 cpacf_pckmo(fc, paramblock);
651
652 /* copy created protected key */
653 protkey->type = keytype;
654 protkey->len = keysize + 32;
655 memcpy(protkey->protkey, paramblock, keysize + 32);
656
657 return 0;
658}
659EXPORT_SYMBOL(pkey_clr2protkey);
660
661/*
662 * query cryptographic facility from adapter
663 */
664static int query_crypto_facility(u16 cardnr, u16 domain,
665 const char *keyword,
666 u8 *rarray, size_t *rarraylen,
667 u8 *varray, size_t *varraylen)
668{
669 int rc;
670 u16 len;
671 u8 *mem, *ptr;
672 struct CPRBX *preqcblk, *prepcblk;
673 struct ica_xcRB xcrb;
674 struct fqreqparm {
675 u8 subfunc_code[2];
676 u16 rule_array_len;
677 char rule_array[8];
678 struct lv1 {
679 u16 len;
680 u8 data[VARDATASIZE];
681 } lv1;
682 u16 dummylen;
683 } *preqparm;
684 size_t parmbsize = sizeof(struct fqreqparm);
685 struct fqrepparm {
686 u8 subfunc_code[2];
687 u8 lvdata[0];
688 } *prepparm;
689
690 /* get already prepared memory for 2 cprbs with param block each */
691 rc = alloc_and_prep_cprbmem(parmbsize, &mem, &preqcblk, &prepcblk);
692 if (rc)
693 return rc;
694
695 /* fill request cprb struct */
696 preqcblk->domain = domain;
697
698 /* fill request cprb param block with FQ request */
699 preqparm = (struct fqreqparm *) preqcblk->req_parmb;
700 memcpy(preqparm->subfunc_code, "FQ", 2);
701 strncpy(preqparm->rule_array, keyword, sizeof(preqparm->rule_array));
702 preqparm->rule_array_len =
703 sizeof(preqparm->rule_array_len) + sizeof(preqparm->rule_array);
704 preqparm->lv1.len = sizeof(preqparm->lv1);
705 preqparm->dummylen = sizeof(preqparm->dummylen);
706 preqcblk->req_parml = parmbsize;
707
708 /* fill xcrb struct */
709 prep_xcrb(&xcrb, cardnr, preqcblk, prepcblk);
710
711 /* forward xcrb with request CPRB and reply CPRB to zcrypt dd */
712 rc = _zcrypt_send_cprb(&xcrb);
713 if (rc) {
714 DEBUG_ERR(
715 "query_crypto_facility zcrypt_send_cprb (cardnr=%d domain=%d) failed with errno %d\n",
716 (int) cardnr, (int) domain, rc);
717 goto out;
718 }
719
720 /* check response returncode and reasoncode */
721 if (prepcblk->ccp_rtcode != 0) {
722 DEBUG_ERR(
723 "query_crypto_facility unwrap secure key failure, card response %d/%d\n",
724 (int) prepcblk->ccp_rtcode,
725 (int) prepcblk->ccp_rscode);
726 rc = -EIO;
727 goto out;
728 }
729
730 /* process response cprb param block */
731 prepcblk->rpl_parmb = ((u8 *) prepcblk) + sizeof(struct CPRBX);
732 prepparm = (struct fqrepparm *) prepcblk->rpl_parmb;
733 ptr = prepparm->lvdata;
734
735 /* check and possibly copy reply rule array */
736 len = *((u16 *) ptr);
737 if (len > sizeof(u16)) {
738 ptr += sizeof(u16);
739 len -= sizeof(u16);
740 if (rarray && rarraylen && *rarraylen > 0) {
741 *rarraylen = (len > *rarraylen ? *rarraylen : len);
742 memcpy(rarray, ptr, *rarraylen);
743 }
744 ptr += len;
745 }
746 /* check and possible copy reply var array */
747 len = *((u16 *) ptr);
748 if (len > sizeof(u16)) {
749 ptr += sizeof(u16);
750 len -= sizeof(u16);
751 if (varray && varraylen && *varraylen > 0) {
752 *varraylen = (len > *varraylen ? *varraylen : len);
753 memcpy(varray, ptr, *varraylen);
754 }
755 ptr += len;
756 }
757
758out:
759 free_cprbmem(mem, parmbsize, 0);
760 return rc;
761}
762
763/*
764 * Fetch just the mkvp value via query_crypto_facility from adapter.
765 */
766static int fetch_mkvp(u16 cardnr, u16 domain, u64 *mkvp)
767{
768 int rc, found = 0;
769 size_t rlen, vlen;
770 u8 *rarray, *varray, *pg;
771
772 pg = (u8 *) __get_free_page(GFP_KERNEL);
773 if (!pg)
774 return -ENOMEM;
775 rarray = pg;
776 varray = pg + PAGE_SIZE/2;
777 rlen = vlen = PAGE_SIZE/2;
778
779 rc = query_crypto_facility(cardnr, domain, "STATICSA",
780 rarray, &rlen, varray, &vlen);
781 if (rc == 0 && rlen > 8*8 && vlen > 184+8) {
782 if (rarray[64] == '2') {
783 /* current master key state is valid */
784 *mkvp = *((u64 *)(varray + 184));
785 found = 1;
786 }
787 }
788
789 free_page((unsigned long) pg);
790
791 return found ? 0 : -ENOENT;
792}
793
794/* struct to hold cached mkvp info for each card/domain */
795struct mkvp_info {
796 struct list_head list;
797 u16 cardnr;
798 u16 domain;
799 u64 mkvp;
800};
801
802/* a list with mkvp_info entries */
803static LIST_HEAD(mkvp_list);
804static DEFINE_SPINLOCK(mkvp_list_lock);
805
806static int mkvp_cache_fetch(u16 cardnr, u16 domain, u64 *mkvp)
807{
808 int rc = -ENOENT;
809 struct mkvp_info *ptr;
810
811 spin_lock_bh(&mkvp_list_lock);
812 list_for_each_entry(ptr, &mkvp_list, list) {
813 if (ptr->cardnr == cardnr &&
814 ptr->domain == domain) {
815 *mkvp = ptr->mkvp;
816 rc = 0;
817 break;
818 }
819 }
820 spin_unlock_bh(&mkvp_list_lock);
821
822 return rc;
823}
824
825static void mkvp_cache_update(u16 cardnr, u16 domain, u64 mkvp)
826{
827 int found = 0;
828 struct mkvp_info *ptr;
829
830 spin_lock_bh(&mkvp_list_lock);
831 list_for_each_entry(ptr, &mkvp_list, list) {
832 if (ptr->cardnr == cardnr &&
833 ptr->domain == domain) {
834 ptr->mkvp = mkvp;
835 found = 1;
836 break;
837 }
838 }
839 if (!found) {
840 ptr = kmalloc(sizeof(*ptr), GFP_ATOMIC);
841 if (!ptr) {
842 spin_unlock_bh(&mkvp_list_lock);
843 return;
844 }
845 ptr->cardnr = cardnr;
846 ptr->domain = domain;
847 ptr->mkvp = mkvp;
848 list_add(&ptr->list, &mkvp_list);
849 }
850 spin_unlock_bh(&mkvp_list_lock);
851}
852
853static void mkvp_cache_scrub(u16 cardnr, u16 domain)
854{
855 struct mkvp_info *ptr;
856
857 spin_lock_bh(&mkvp_list_lock);
858 list_for_each_entry(ptr, &mkvp_list, list) {
859 if (ptr->cardnr == cardnr &&
860 ptr->domain == domain) {
861 list_del(&ptr->list);
862 kfree(ptr);
863 break;
864 }
865 }
866 spin_unlock_bh(&mkvp_list_lock);
867}
868
869static void __exit mkvp_cache_free(void)
870{
871 struct mkvp_info *ptr, *pnext;
872
873 spin_lock_bh(&mkvp_list_lock);
874 list_for_each_entry_safe(ptr, pnext, &mkvp_list, list) {
875 list_del(&ptr->list);
876 kfree(ptr);
877 }
878 spin_unlock_bh(&mkvp_list_lock);
879}
880
881/*
882 * Search for a matching crypto card based on the Master Key
883 * Verification Pattern provided inside a secure key.
884 */
885int pkey_findcard(const struct pkey_seckey *seckey,
886 u16 *pcardnr, u16 *pdomain, int verify)
887{
888 struct secaeskeytoken *t = (struct secaeskeytoken *) seckey;
889 struct zcrypt_device_matrix *device_matrix;
890 u16 card, dom;
891 u64 mkvp;
892 int i, rc;
893
894 /* mkvp must not be zero */
895 if (t->mkvp == 0)
896 return -EINVAL;
897
898 /* fetch status of all crypto cards */
899 device_matrix = kmalloc(sizeof(struct zcrypt_device_matrix),
900 GFP_KERNEL);
901 if (!device_matrix)
902 return -ENOMEM;
903 zcrypt_device_status_mask(device_matrix);
904
905 /* walk through all crypto cards */
906 for (i = 0; i < MAX_ZDEV_ENTRIES; i++) {
907 card = AP_QID_CARD(device_matrix->device[i].qid);
908 dom = AP_QID_QUEUE(device_matrix->device[i].qid);
909 if (device_matrix->device[i].online &&
910 device_matrix->device[i].functions & 0x04) {
911 /* an enabled CCA Coprocessor card */
912 /* try cached mkvp */
913 if (mkvp_cache_fetch(card, dom, &mkvp) == 0 &&
914 t->mkvp == mkvp) {
915 if (!verify)
916 break;
917 /* verify: fetch mkvp from adapter */
918 if (fetch_mkvp(card, dom, &mkvp) == 0) {
919 mkvp_cache_update(card, dom, mkvp);
920 if (t->mkvp == mkvp)
921 break;
922 }
923 }
924 } else {
925 /* Card is offline and/or not a CCA card. */
926 /* del mkvp entry from cache if it exists */
927 mkvp_cache_scrub(card, dom);
928 }
929 }
930 if (i >= MAX_ZDEV_ENTRIES) {
931 /* nothing found, so this time without cache */
932 for (i = 0; i < MAX_ZDEV_ENTRIES; i++) {
933 if (!(device_matrix->device[i].online &&
934 device_matrix->device[i].functions & 0x04))
935 continue;
936 card = AP_QID_CARD(device_matrix->device[i].qid);
937 dom = AP_QID_QUEUE(device_matrix->device[i].qid);
938 /* fresh fetch mkvp from adapter */
939 if (fetch_mkvp(card, dom, &mkvp) == 0) {
940 mkvp_cache_update(card, dom, mkvp);
941 if (t->mkvp == mkvp)
942 break;
943 }
944 }
945 }
946 if (i < MAX_ZDEV_ENTRIES) {
947 if (pcardnr)
948 *pcardnr = card;
949 if (pdomain)
950 *pdomain = dom;
951 rc = 0;
952 } else
953 rc = -ENODEV;
954
955 kfree(device_matrix);
956 return rc;
957}
958EXPORT_SYMBOL(pkey_findcard);
959
960/*
961 * Find card and transform secure key into protected key.
962 */
963int pkey_skey2pkey(const struct pkey_seckey *seckey,
964 struct pkey_protkey *protkey)
965{
966 u16 cardnr, domain;
967 int rc, verify;
968
969 /*
970 * The pkey_sec2protkey call may fail when a card has been
971 * addressed where the master key was changed after last fetch
972 * of the mkvp into the cache. So first try without verify then
973 * with verify enabled (thus refreshing the mkvp for each card).
974 */
975 for (verify = 0; verify < 2; verify++) {
976 rc = pkey_findcard(seckey, &cardnr, &domain, verify);
977 if (rc)
978 continue;
979 rc = pkey_sec2protkey(cardnr, domain, seckey, protkey);
980 if (rc == 0)
981 break;
982 }
983
984 if (rc)
985 DEBUG_DBG("pkey_skey2pkey failed rc=%d\n", rc);
986
987 return rc;
988}
989EXPORT_SYMBOL(pkey_skey2pkey);
990
991/*
992 * File io functions
993 */
994
995static long pkey_unlocked_ioctl(struct file *filp, unsigned int cmd,
996 unsigned long arg)
997{
998 int rc;
999
1000 switch (cmd) {
1001 case PKEY_GENSECK: {
1002 struct pkey_genseck __user *ugs = (void __user *) arg;
1003 struct pkey_genseck kgs;
1004
1005 if (copy_from_user(&kgs, ugs, sizeof(kgs)))
1006 return -EFAULT;
1007 rc = pkey_genseckey(kgs.cardnr, kgs.domain,
1008 kgs.keytype, &kgs.seckey);
1009 DEBUG_DBG("pkey_ioctl pkey_genseckey()=%d\n", rc);
1010 if (rc)
1011 break;
1012 if (copy_to_user(ugs, &kgs, sizeof(kgs)))
1013 return -EFAULT;
1014 break;
1015 }
1016 case PKEY_CLR2SECK: {
1017 struct pkey_clr2seck __user *ucs = (void __user *) arg;
1018 struct pkey_clr2seck kcs;
1019
1020 if (copy_from_user(&kcs, ucs, sizeof(kcs)))
1021 return -EFAULT;
1022 rc = pkey_clr2seckey(kcs.cardnr, kcs.domain, kcs.keytype,
1023 &kcs.clrkey, &kcs.seckey);
1024 DEBUG_DBG("pkey_ioctl pkey_clr2seckey()=%d\n", rc);
1025 if (rc)
1026 break;
1027 if (copy_to_user(ucs, &kcs, sizeof(kcs)))
1028 return -EFAULT;
1029 memzero_explicit(&kcs, sizeof(kcs));
1030 break;
1031 }
1032 case PKEY_SEC2PROTK: {
1033 struct pkey_sec2protk __user *usp = (void __user *) arg;
1034 struct pkey_sec2protk ksp;
1035
1036 if (copy_from_user(&ksp, usp, sizeof(ksp)))
1037 return -EFAULT;
1038 rc = pkey_sec2protkey(ksp.cardnr, ksp.domain,
1039 &ksp.seckey, &ksp.protkey);
1040 DEBUG_DBG("pkey_ioctl pkey_sec2protkey()=%d\n", rc);
1041 if (rc)
1042 break;
1043 if (copy_to_user(usp, &ksp, sizeof(ksp)))
1044 return -EFAULT;
1045 break;
1046 }
1047 case PKEY_CLR2PROTK: {
1048 struct pkey_clr2protk __user *ucp = (void __user *) arg;
1049 struct pkey_clr2protk kcp;
1050
1051 if (copy_from_user(&kcp, ucp, sizeof(kcp)))
1052 return -EFAULT;
1053 rc = pkey_clr2protkey(kcp.keytype,
1054 &kcp.clrkey, &kcp.protkey);
1055 DEBUG_DBG("pkey_ioctl pkey_clr2protkey()=%d\n", rc);
1056 if (rc)
1057 break;
1058 if (copy_to_user(ucp, &kcp, sizeof(kcp)))
1059 return -EFAULT;
1060 memzero_explicit(&kcp, sizeof(kcp));
1061 break;
1062 }
1063 case PKEY_FINDCARD: {
1064 struct pkey_findcard __user *ufc = (void __user *) arg;
1065 struct pkey_findcard kfc;
1066
1067 if (copy_from_user(&kfc, ufc, sizeof(kfc)))
1068 return -EFAULT;
1069 rc = pkey_findcard(&kfc.seckey,
1070 &kfc.cardnr, &kfc.domain, 1);
1071 DEBUG_DBG("pkey_ioctl pkey_findcard()=%d\n", rc);
1072 if (rc)
1073 break;
1074 if (copy_to_user(ufc, &kfc, sizeof(kfc)))
1075 return -EFAULT;
1076 break;
1077 }
1078 case PKEY_SKEY2PKEY: {
1079 struct pkey_skey2pkey __user *usp = (void __user *) arg;
1080 struct pkey_skey2pkey ksp;
1081
1082 if (copy_from_user(&ksp, usp, sizeof(ksp)))
1083 return -EFAULT;
1084 rc = pkey_skey2pkey(&ksp.seckey, &ksp.protkey);
1085 DEBUG_DBG("pkey_ioctl pkey_skey2pkey()=%d\n", rc);
1086 if (rc)
1087 break;
1088 if (copy_to_user(usp, &ksp, sizeof(ksp)))
1089 return -EFAULT;
1090 break;
1091 }
1092 default:
1093 /* unknown/unsupported ioctl cmd */
1094 return -ENOTTY;
1095 }
1096
1097 return rc;
1098}
1099
1100/*
1101 * Sysfs and file io operations
1102 */
1103static const struct file_operations pkey_fops = {
1104 .owner = THIS_MODULE,
1105 .open = nonseekable_open,
1106 .llseek = no_llseek,
1107 .unlocked_ioctl = pkey_unlocked_ioctl,
1108};
1109
1110static struct miscdevice pkey_dev = {
1111 .name = "pkey",
1112 .minor = MISC_DYNAMIC_MINOR,
1113 .mode = 0666,
1114 .fops = &pkey_fops,
1115};
1116
1117/*
1118 * Module init
1119 */
1120int __init pkey_init(void)
1121{
1122 cpacf_mask_t pckmo_functions;
1123
1124 /* check for pckmo instructions available */
1125 if (!cpacf_query(CPACF_PCKMO, &pckmo_functions))
1126 return -EOPNOTSUPP;
1127 if (!cpacf_test_func(&pckmo_functions, CPACF_PCKMO_ENC_AES_128_KEY) ||
1128 !cpacf_test_func(&pckmo_functions, CPACF_PCKMO_ENC_AES_192_KEY) ||
1129 !cpacf_test_func(&pckmo_functions, CPACF_PCKMO_ENC_AES_256_KEY))
1130 return -EOPNOTSUPP;
1131
1132 pkey_debug_init();
1133
1134 return misc_register(&pkey_dev);
1135}
1136
1137/*
1138 * Module exit
1139 */
1140static void __exit pkey_exit(void)
1141{
1142 misc_deregister(&pkey_dev);
1143 mkvp_cache_free();
1144 pkey_debug_exit();
1145}
1146
1147module_init(pkey_init);
1148module_exit(pkey_exit);
diff --git a/drivers/s390/crypto/zcrypt_api.c b/drivers/s390/crypto/zcrypt_api.c
index 144a17941e6f..93015f85d4a6 100644
--- a/drivers/s390/crypto/zcrypt_api.c
+++ b/drivers/s390/crypto/zcrypt_api.c
@@ -374,7 +374,7 @@ out:
374 return rc; 374 return rc;
375} 375}
376 376
377static long zcrypt_send_cprb(struct ica_xcRB *xcRB) 377long zcrypt_send_cprb(struct ica_xcRB *xcRB)
378{ 378{
379 struct zcrypt_card *zc, *pref_zc; 379 struct zcrypt_card *zc, *pref_zc;
380 struct zcrypt_queue *zq, *pref_zq; 380 struct zcrypt_queue *zq, *pref_zq;
@@ -444,6 +444,7 @@ out:
444 AP_QID_CARD(qid), AP_QID_QUEUE(qid)); 444 AP_QID_CARD(qid), AP_QID_QUEUE(qid));
445 return rc; 445 return rc;
446} 446}
447EXPORT_SYMBOL(zcrypt_send_cprb);
447 448
448static bool is_desired_ep11_card(unsigned int dev_id, 449static bool is_desired_ep11_card(unsigned int dev_id,
449 unsigned short target_num, 450 unsigned short target_num,
@@ -619,7 +620,7 @@ out:
619 return rc; 620 return rc;
620} 621}
621 622
622static void zcrypt_device_status_mask(struct zcrypt_device_matrix *matrix) 623void zcrypt_device_status_mask(struct zcrypt_device_matrix *matrix)
623{ 624{
624 struct zcrypt_card *zc; 625 struct zcrypt_card *zc;
625 struct zcrypt_queue *zq; 626 struct zcrypt_queue *zq;
diff --git a/drivers/s390/crypto/zcrypt_api.h b/drivers/s390/crypto/zcrypt_api.h
index 274a59051534..6c94efd23eac 100644
--- a/drivers/s390/crypto/zcrypt_api.h
+++ b/drivers/s390/crypto/zcrypt_api.h
@@ -190,5 +190,7 @@ void zcrypt_msgtype_unregister(struct zcrypt_ops *);
190struct zcrypt_ops *zcrypt_msgtype(unsigned char *, int); 190struct zcrypt_ops *zcrypt_msgtype(unsigned char *, int);
191int zcrypt_api_init(void); 191int zcrypt_api_init(void);
192void zcrypt_api_exit(void); 192void zcrypt_api_exit(void);
193long zcrypt_send_cprb(struct ica_xcRB *xcRB);
194void zcrypt_device_status_mask(struct zcrypt_device_matrix *devstatus);
193 195
194#endif /* _ZCRYPT_API_H_ */ 196#endif /* _ZCRYPT_API_H_ */