aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorGonglei <arei.gonglei@huawei.com>2016-12-14 21:03:16 -0500
committerMichael S. Tsirkin <mst@redhat.com>2016-12-15 17:13:32 -0500
commitdbaf0624ffa57ae6e7d87a823185ccd9a7852d3c (patch)
treebca0d89ad92a6907449fcddcf0aa4ae60dc5e912
parent809ecb9bca6a9424ccd392d67e368160f8b76c92 (diff)
crypto: add virtio-crypto driver
This patch introduces virtio-crypto driver for Linux Kernel. The virtio crypto device is a virtual cryptography device as well as a kind of virtual hardware accelerator for virtual machines. The encryption anddecryption requests are placed in the data queue and are ultimately handled by thebackend crypto accelerators. The second queue is the control queue used to create or destroy sessions for symmetric algorithms and will control some advanced features in the future. The virtio crypto device provides the following cryptoservices: CIPHER, MAC, HASH, and AEAD. For more information about virtio-crypto device, please see: http://qemu-project.org/Features/VirtioCrypto CC: Michael S. Tsirkin <mst@redhat.com> CC: Cornelia Huck <cornelia.huck@de.ibm.com> CC: Stefan Hajnoczi <stefanha@redhat.com> CC: Herbert Xu <herbert@gondor.apana.org.au> CC: Halil Pasic <pasic@linux.vnet.ibm.com> CC: David S. Miller <davem@davemloft.net> CC: Zeng Xin <xin.zeng@intel.com> Signed-off-by: Gonglei <arei.gonglei@huawei.com> Signed-off-by: Michael S. Tsirkin <mst@redhat.com>
-rw-r--r--MAINTAINERS9
-rw-r--r--drivers/crypto/Kconfig2
-rw-r--r--drivers/crypto/Makefile1
-rw-r--r--drivers/crypto/virtio/Kconfig10
-rw-r--r--drivers/crypto/virtio/Makefile5
-rw-r--r--drivers/crypto/virtio/virtio_crypto_algs.c540
-rw-r--r--drivers/crypto/virtio/virtio_crypto_common.h128
-rw-r--r--drivers/crypto/virtio/virtio_crypto_core.c476
-rw-r--r--drivers/crypto/virtio/virtio_crypto_mgr.c264
-rw-r--r--include/uapi/linux/Kbuild1
-rw-r--r--include/uapi/linux/virtio_crypto.h450
-rw-r--r--include/uapi/linux/virtio_ids.h1
12 files changed, 1887 insertions, 0 deletions
diff --git a/MAINTAINERS b/MAINTAINERS
index 59c9895d73d5..650ad4f6b608 100644
--- a/MAINTAINERS
+++ b/MAINTAINERS
@@ -12988,6 +12988,7 @@ F: drivers/net/virtio_net.c
12988F: drivers/block/virtio_blk.c 12988F: drivers/block/virtio_blk.c
12989F: include/linux/virtio_*.h 12989F: include/linux/virtio_*.h
12990F: include/uapi/linux/virtio_*.h 12990F: include/uapi/linux/virtio_*.h
12991F: drivers/crypto/virtio/
12991 12992
12992VIRTIO DRIVERS FOR S390 12993VIRTIO DRIVERS FOR S390
12993M: Christian Borntraeger <borntraeger@de.ibm.com> 12994M: Christian Borntraeger <borntraeger@de.ibm.com>
@@ -13024,6 +13025,14 @@ S: Maintained
13024F: drivers/virtio/virtio_input.c 13025F: drivers/virtio/virtio_input.c
13025F: include/uapi/linux/virtio_input.h 13026F: include/uapi/linux/virtio_input.h
13026 13027
13028VIRTIO CRYPTO DRIVER
13029M: Gonglei <arei.gonglei@huawei.com>
13030L: virtualization@lists.linux-foundation.org
13031L: linux-crypto@vger.kernel.org
13032S: Maintained
13033F: drivers/crypto/virtio/
13034F: include/uapi/linux/virtio_crypto.h
13035
13027VIA RHINE NETWORK DRIVER 13036VIA RHINE NETWORK DRIVER
13028S: Orphan 13037S: Orphan
13029F: drivers/net/ethernet/via/via-rhine.c 13038F: drivers/net/ethernet/via/via-rhine.c
diff --git a/drivers/crypto/Kconfig b/drivers/crypto/Kconfig
index 4d2b81f2b223..79564785ae30 100644
--- a/drivers/crypto/Kconfig
+++ b/drivers/crypto/Kconfig
@@ -555,4 +555,6 @@ config CRYPTO_DEV_ROCKCHIP
555 555
556source "drivers/crypto/chelsio/Kconfig" 556source "drivers/crypto/chelsio/Kconfig"
557 557
558source "drivers/crypto/virtio/Kconfig"
559
558endif # CRYPTO_HW 560endif # CRYPTO_HW
diff --git a/drivers/crypto/Makefile b/drivers/crypto/Makefile
index ad7250fa1348..bc53cb833a06 100644
--- a/drivers/crypto/Makefile
+++ b/drivers/crypto/Makefile
@@ -32,3 +32,4 @@ obj-$(CONFIG_CRYPTO_DEV_VMX) += vmx/
32obj-$(CONFIG_CRYPTO_DEV_SUN4I_SS) += sunxi-ss/ 32obj-$(CONFIG_CRYPTO_DEV_SUN4I_SS) += sunxi-ss/
33obj-$(CONFIG_CRYPTO_DEV_ROCKCHIP) += rockchip/ 33obj-$(CONFIG_CRYPTO_DEV_ROCKCHIP) += rockchip/
34obj-$(CONFIG_CRYPTO_DEV_CHELSIO) += chelsio/ 34obj-$(CONFIG_CRYPTO_DEV_CHELSIO) += chelsio/
35obj-$(CONFIG_CRYPTO_DEV_VIRTIO) += virtio/
diff --git a/drivers/crypto/virtio/Kconfig b/drivers/crypto/virtio/Kconfig
new file mode 100644
index 000000000000..d80f73366ae2
--- /dev/null
+++ b/drivers/crypto/virtio/Kconfig
@@ -0,0 +1,10 @@
1config CRYPTO_DEV_VIRTIO
2 tristate "VirtIO crypto driver"
3 depends on VIRTIO
4 select CRYPTO_AEAD
5 select CRYPTO_AUTHENC
6 select CRYPTO_BLKCIPHER
7 default m
8 help
9 This driver provides support for virtio crypto device. If you
10 choose 'M' here, this module will be called virtio_crypto.
diff --git a/drivers/crypto/virtio/Makefile b/drivers/crypto/virtio/Makefile
new file mode 100644
index 000000000000..dd342c947ff9
--- /dev/null
+++ b/drivers/crypto/virtio/Makefile
@@ -0,0 +1,5 @@
1obj-$(CONFIG_CRYPTO_DEV_VIRTIO) += virtio_crypto.o
2virtio_crypto-objs := \
3 virtio_crypto_algs.o \
4 virtio_crypto_mgr.o \
5 virtio_crypto_core.o
diff --git a/drivers/crypto/virtio/virtio_crypto_algs.c b/drivers/crypto/virtio/virtio_crypto_algs.c
new file mode 100644
index 000000000000..c2374df9abae
--- /dev/null
+++ b/drivers/crypto/virtio/virtio_crypto_algs.c
@@ -0,0 +1,540 @@
1 /* Algorithms supported by virtio crypto device
2 *
3 * Authors: Gonglei <arei.gonglei@huawei.com>
4 *
5 * Copyright 2016 HUAWEI TECHNOLOGIES CO., LTD.
6 *
7 * This program is free software; you can redistribute it and/or modify
8 * it under the terms of the GNU General Public License as published by
9 * the Free Software Foundation; either version 2 of the License, or
10 * (at your option) any later version.
11 *
12 * This program is distributed in the hope that it will be useful,
13 * but WITHOUT ANY WARRANTY; without even the implied warranty of
14 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
15 * GNU General Public License for more details.
16 *
17 * You should have received a copy of the GNU General Public License
18 * along with this program; if not, see <http://www.gnu.org/licenses/>.
19 */
20
21#include <linux/scatterlist.h>
22#include <crypto/algapi.h>
23#include <linux/err.h>
24#include <crypto/scatterwalk.h>
25#include <linux/atomic.h>
26
27#include <uapi/linux/virtio_crypto.h>
28#include "virtio_crypto_common.h"
29
30/*
31 * The algs_lock protects the below global virtio_crypto_active_devs
32 * and crypto algorithms registion.
33 */
34static DEFINE_MUTEX(algs_lock);
35static unsigned int virtio_crypto_active_devs;
36
37static u64 virtio_crypto_alg_sg_nents_length(struct scatterlist *sg)
38{
39 u64 total = 0;
40
41 for (total = 0; sg; sg = sg_next(sg))
42 total += sg->length;
43
44 return total;
45}
46
47static int
48virtio_crypto_alg_validate_key(int key_len, uint32_t *alg)
49{
50 switch (key_len) {
51 case AES_KEYSIZE_128:
52 case AES_KEYSIZE_192:
53 case AES_KEYSIZE_256:
54 *alg = VIRTIO_CRYPTO_CIPHER_AES_CBC;
55 break;
56 default:
57 pr_err("virtio_crypto: Unsupported key length: %d\n",
58 key_len);
59 return -EINVAL;
60 }
61 return 0;
62}
63
64static int virtio_crypto_alg_ablkcipher_init_session(
65 struct virtio_crypto_ablkcipher_ctx *ctx,
66 uint32_t alg, const uint8_t *key,
67 unsigned int keylen,
68 int encrypt)
69{
70 struct scatterlist outhdr, key_sg, inhdr, *sgs[3];
71 unsigned int tmp;
72 struct virtio_crypto *vcrypto = ctx->vcrypto;
73 int op = encrypt ? VIRTIO_CRYPTO_OP_ENCRYPT : VIRTIO_CRYPTO_OP_DECRYPT;
74 int err;
75 unsigned int num_out = 0, num_in = 0;
76
77 /*
78 * Avoid to do DMA from the stack, switch to using
79 * dynamically-allocated for the key
80 */
81 uint8_t *cipher_key = kmalloc(keylen, GFP_ATOMIC);
82
83 if (!cipher_key)
84 return -ENOMEM;
85
86 memcpy(cipher_key, key, keylen);
87
88 spin_lock(&vcrypto->ctrl_lock);
89 /* Pad ctrl header */
90 vcrypto->ctrl.header.opcode =
91 cpu_to_le32(VIRTIO_CRYPTO_CIPHER_CREATE_SESSION);
92 vcrypto->ctrl.header.algo = cpu_to_le32(alg);
93 /* Set the default dataqueue id to 0 */
94 vcrypto->ctrl.header.queue_id = 0;
95
96 vcrypto->input.status = cpu_to_le32(VIRTIO_CRYPTO_ERR);
97 /* Pad cipher's parameters */
98 vcrypto->ctrl.u.sym_create_session.op_type =
99 cpu_to_le32(VIRTIO_CRYPTO_SYM_OP_CIPHER);
100 vcrypto->ctrl.u.sym_create_session.u.cipher.para.algo =
101 vcrypto->ctrl.header.algo;
102 vcrypto->ctrl.u.sym_create_session.u.cipher.para.keylen =
103 cpu_to_le32(keylen);
104 vcrypto->ctrl.u.sym_create_session.u.cipher.para.op =
105 cpu_to_le32(op);
106
107 sg_init_one(&outhdr, &vcrypto->ctrl, sizeof(vcrypto->ctrl));
108 sgs[num_out++] = &outhdr;
109
110 /* Set key */
111 sg_init_one(&key_sg, cipher_key, keylen);
112 sgs[num_out++] = &key_sg;
113
114 /* Return status and session id back */
115 sg_init_one(&inhdr, &vcrypto->input, sizeof(vcrypto->input));
116 sgs[num_out + num_in++] = &inhdr;
117
118 err = virtqueue_add_sgs(vcrypto->ctrl_vq, sgs, num_out,
119 num_in, vcrypto, GFP_ATOMIC);
120 if (err < 0) {
121 spin_unlock(&vcrypto->ctrl_lock);
122 kzfree(cipher_key);
123 return err;
124 }
125 virtqueue_kick(vcrypto->ctrl_vq);
126
127 /*
128 * Trapping into the hypervisor, so the request should be
129 * handled immediately.
130 */
131 while (!virtqueue_get_buf(vcrypto->ctrl_vq, &tmp) &&
132 !virtqueue_is_broken(vcrypto->ctrl_vq))
133 cpu_relax();
134
135 if (le32_to_cpu(vcrypto->input.status) != VIRTIO_CRYPTO_OK) {
136 spin_unlock(&vcrypto->ctrl_lock);
137 pr_err("virtio_crypto: Create session failed status: %u\n",
138 le32_to_cpu(vcrypto->input.status));
139 kzfree(cipher_key);
140 return -EINVAL;
141 }
142
143 if (encrypt)
144 ctx->enc_sess_info.session_id =
145 le64_to_cpu(vcrypto->input.session_id);
146 else
147 ctx->dec_sess_info.session_id =
148 le64_to_cpu(vcrypto->input.session_id);
149
150 spin_unlock(&vcrypto->ctrl_lock);
151
152 kzfree(cipher_key);
153 return 0;
154}
155
156static int virtio_crypto_alg_ablkcipher_close_session(
157 struct virtio_crypto_ablkcipher_ctx *ctx,
158 int encrypt)
159{
160 struct scatterlist outhdr, status_sg, *sgs[2];
161 unsigned int tmp;
162 struct virtio_crypto_destroy_session_req *destroy_session;
163 struct virtio_crypto *vcrypto = ctx->vcrypto;
164 int err;
165 unsigned int num_out = 0, num_in = 0;
166
167 spin_lock(&vcrypto->ctrl_lock);
168 vcrypto->ctrl_status.status = VIRTIO_CRYPTO_ERR;
169 /* Pad ctrl header */
170 vcrypto->ctrl.header.opcode =
171 cpu_to_le32(VIRTIO_CRYPTO_CIPHER_DESTROY_SESSION);
172 /* Set the default virtqueue id to 0 */
173 vcrypto->ctrl.header.queue_id = 0;
174
175 destroy_session = &vcrypto->ctrl.u.destroy_session;
176
177 if (encrypt)
178 destroy_session->session_id =
179 cpu_to_le64(ctx->enc_sess_info.session_id);
180 else
181 destroy_session->session_id =
182 cpu_to_le64(ctx->dec_sess_info.session_id);
183
184 sg_init_one(&outhdr, &vcrypto->ctrl, sizeof(vcrypto->ctrl));
185 sgs[num_out++] = &outhdr;
186
187 /* Return status and session id back */
188 sg_init_one(&status_sg, &vcrypto->ctrl_status.status,
189 sizeof(vcrypto->ctrl_status.status));
190 sgs[num_out + num_in++] = &status_sg;
191
192 err = virtqueue_add_sgs(vcrypto->ctrl_vq, sgs, num_out,
193 num_in, vcrypto, GFP_ATOMIC);
194 if (err < 0) {
195 spin_unlock(&vcrypto->ctrl_lock);
196 return err;
197 }
198 virtqueue_kick(vcrypto->ctrl_vq);
199
200 while (!virtqueue_get_buf(vcrypto->ctrl_vq, &tmp) &&
201 !virtqueue_is_broken(vcrypto->ctrl_vq))
202 cpu_relax();
203
204 if (vcrypto->ctrl_status.status != VIRTIO_CRYPTO_OK) {
205 spin_unlock(&vcrypto->ctrl_lock);
206 pr_err("virtio_crypto: Close session failed status: %u, session_id: 0x%llx\n",
207 vcrypto->ctrl_status.status,
208 destroy_session->session_id);
209
210 return -EINVAL;
211 }
212 spin_unlock(&vcrypto->ctrl_lock);
213
214 return 0;
215}
216
217static int virtio_crypto_alg_ablkcipher_init_sessions(
218 struct virtio_crypto_ablkcipher_ctx *ctx,
219 const uint8_t *key, unsigned int keylen)
220{
221 uint32_t alg;
222 int ret;
223 struct virtio_crypto *vcrypto = ctx->vcrypto;
224
225 if (keylen > vcrypto->max_cipher_key_len) {
226 pr_err("virtio_crypto: the key is too long\n");
227 goto bad_key;
228 }
229
230 if (virtio_crypto_alg_validate_key(keylen, &alg))
231 goto bad_key;
232
233 /* Create encryption session */
234 ret = virtio_crypto_alg_ablkcipher_init_session(ctx,
235 alg, key, keylen, 1);
236 if (ret)
237 return ret;
238 /* Create decryption session */
239 ret = virtio_crypto_alg_ablkcipher_init_session(ctx,
240 alg, key, keylen, 0);
241 if (ret) {
242 virtio_crypto_alg_ablkcipher_close_session(ctx, 1);
243 return ret;
244 }
245 return 0;
246
247bad_key:
248 crypto_tfm_set_flags(ctx->tfm, CRYPTO_TFM_RES_BAD_KEY_LEN);
249 return -EINVAL;
250}
251
252/* Note: kernel crypto API realization */
253static int virtio_crypto_ablkcipher_setkey(struct crypto_ablkcipher *tfm,
254 const uint8_t *key,
255 unsigned int keylen)
256{
257 struct virtio_crypto_ablkcipher_ctx *ctx = crypto_ablkcipher_ctx(tfm);
258 int ret;
259
260 if (!ctx->vcrypto) {
261 /* New key */
262 int node = virtio_crypto_get_current_node();
263 struct virtio_crypto *vcrypto =
264 virtcrypto_get_dev_node(node);
265 if (!vcrypto) {
266 pr_err("virtio_crypto: Could not find a virtio device in the system");
267 return -ENODEV;
268 }
269
270 ctx->vcrypto = vcrypto;
271 } else {
272 /* Rekeying, we should close the created sessions previously */
273 virtio_crypto_alg_ablkcipher_close_session(ctx, 1);
274 virtio_crypto_alg_ablkcipher_close_session(ctx, 0);
275 }
276
277 ret = virtio_crypto_alg_ablkcipher_init_sessions(ctx, key, keylen);
278 if (ret) {
279 virtcrypto_dev_put(ctx->vcrypto);
280 ctx->vcrypto = NULL;
281
282 return ret;
283 }
284
285 return 0;
286}
287
288static int
289__virtio_crypto_ablkcipher_do_req(struct virtio_crypto_request *vc_req,
290 struct ablkcipher_request *req,
291 struct data_queue *data_vq,
292 __u8 op)
293{
294 struct crypto_ablkcipher *tfm = crypto_ablkcipher_reqtfm(req);
295 unsigned int ivsize = crypto_ablkcipher_ivsize(tfm);
296 struct virtio_crypto_ablkcipher_ctx *ctx = vc_req->ablkcipher_ctx;
297 struct virtio_crypto *vcrypto = ctx->vcrypto;
298 struct virtio_crypto_op_data_req *req_data;
299 int src_nents, dst_nents;
300 int err;
301 unsigned long flags;
302 struct scatterlist outhdr, iv_sg, status_sg, **sgs;
303 int i;
304 u64 dst_len;
305 unsigned int num_out = 0, num_in = 0;
306 int sg_total;
307 uint8_t *iv;
308
309 src_nents = sg_nents_for_len(req->src, req->nbytes);
310 dst_nents = sg_nents(req->dst);
311
312 pr_debug("virtio_crypto: Number of sgs (src_nents: %d, dst_nents: %d)\n",
313 src_nents, dst_nents);
314
315 /* Why 3? outhdr + iv + inhdr */
316 sg_total = src_nents + dst_nents + 3;
317 sgs = kzalloc_node(sg_total * sizeof(*sgs), GFP_ATOMIC,
318 dev_to_node(&vcrypto->vdev->dev));
319 if (!sgs)
320 return -ENOMEM;
321
322 req_data = kzalloc_node(sizeof(*req_data), GFP_ATOMIC,
323 dev_to_node(&vcrypto->vdev->dev));
324 if (!req_data) {
325 kfree(sgs);
326 return -ENOMEM;
327 }
328
329 vc_req->req_data = req_data;
330 vc_req->type = VIRTIO_CRYPTO_SYM_OP_CIPHER;
331 /* Head of operation */
332 if (op) {
333 req_data->header.session_id =
334 cpu_to_le64(ctx->enc_sess_info.session_id);
335 req_data->header.opcode =
336 cpu_to_le32(VIRTIO_CRYPTO_CIPHER_ENCRYPT);
337 } else {
338 req_data->header.session_id =
339 cpu_to_le64(ctx->dec_sess_info.session_id);
340 req_data->header.opcode =
341 cpu_to_le32(VIRTIO_CRYPTO_CIPHER_DECRYPT);
342 }
343 req_data->u.sym_req.op_type = cpu_to_le32(VIRTIO_CRYPTO_SYM_OP_CIPHER);
344 req_data->u.sym_req.u.cipher.para.iv_len = cpu_to_le32(ivsize);
345 req_data->u.sym_req.u.cipher.para.src_data_len =
346 cpu_to_le32(req->nbytes);
347
348 dst_len = virtio_crypto_alg_sg_nents_length(req->dst);
349 if (unlikely(dst_len > U32_MAX)) {
350 pr_err("virtio_crypto: The dst_len is beyond U32_MAX\n");
351 err = -EINVAL;
352 goto free;
353 }
354
355 pr_debug("virtio_crypto: src_len: %u, dst_len: %llu\n",
356 req->nbytes, dst_len);
357
358 if (unlikely(req->nbytes + dst_len + ivsize +
359 sizeof(vc_req->status) > vcrypto->max_size)) {
360 pr_err("virtio_crypto: The length is too big\n");
361 err = -EINVAL;
362 goto free;
363 }
364
365 req_data->u.sym_req.u.cipher.para.dst_data_len =
366 cpu_to_le32((uint32_t)dst_len);
367
368 /* Outhdr */
369 sg_init_one(&outhdr, req_data, sizeof(*req_data));
370 sgs[num_out++] = &outhdr;
371
372 /* IV */
373
374 /*
375 * Avoid to do DMA from the stack, switch to using
376 * dynamically-allocated for the IV
377 */
378 iv = kzalloc_node(ivsize, GFP_ATOMIC,
379 dev_to_node(&vcrypto->vdev->dev));
380 if (!iv) {
381 err = -ENOMEM;
382 goto free;
383 }
384 memcpy(iv, req->info, ivsize);
385 sg_init_one(&iv_sg, iv, ivsize);
386 sgs[num_out++] = &iv_sg;
387 vc_req->iv = iv;
388
389 /* Source data */
390 for (i = 0; i < src_nents; i++)
391 sgs[num_out++] = &req->src[i];
392
393 /* Destination data */
394 for (i = 0; i < dst_nents; i++)
395 sgs[num_out + num_in++] = &req->dst[i];
396
397 /* Status */
398 sg_init_one(&status_sg, &vc_req->status, sizeof(vc_req->status));
399 sgs[num_out + num_in++] = &status_sg;
400
401 vc_req->sgs = sgs;
402
403 spin_lock_irqsave(&data_vq->lock, flags);
404 err = virtqueue_add_sgs(data_vq->vq, sgs, num_out,
405 num_in, vc_req, GFP_ATOMIC);
406 virtqueue_kick(data_vq->vq);
407 spin_unlock_irqrestore(&data_vq->lock, flags);
408 if (unlikely(err < 0))
409 goto free_iv;
410
411 return 0;
412
413free_iv:
414 kzfree(iv);
415free:
416 kzfree(req_data);
417 kfree(sgs);
418 return err;
419}
420
421static int virtio_crypto_ablkcipher_encrypt(struct ablkcipher_request *req)
422{
423 struct crypto_ablkcipher *atfm = crypto_ablkcipher_reqtfm(req);
424 struct virtio_crypto_ablkcipher_ctx *ctx = crypto_ablkcipher_ctx(atfm);
425 struct virtio_crypto_request *vc_req = ablkcipher_request_ctx(req);
426 struct virtio_crypto *vcrypto = ctx->vcrypto;
427 int ret;
428 /* Use the first data virtqueue as default */
429 struct data_queue *data_vq = &vcrypto->data_vq[0];
430
431 vc_req->ablkcipher_ctx = ctx;
432 vc_req->ablkcipher_req = req;
433 ret = __virtio_crypto_ablkcipher_do_req(vc_req, req, data_vq, 1);
434 if (ret < 0) {
435 pr_err("virtio_crypto: Encryption failed!\n");
436 return ret;
437 }
438
439 return -EINPROGRESS;
440}
441
442static int virtio_crypto_ablkcipher_decrypt(struct ablkcipher_request *req)
443{
444 struct crypto_ablkcipher *atfm = crypto_ablkcipher_reqtfm(req);
445 struct virtio_crypto_ablkcipher_ctx *ctx = crypto_ablkcipher_ctx(atfm);
446 struct virtio_crypto_request *vc_req = ablkcipher_request_ctx(req);
447 struct virtio_crypto *vcrypto = ctx->vcrypto;
448 int ret;
449 /* Use the first data virtqueue as default */
450 struct data_queue *data_vq = &vcrypto->data_vq[0];
451
452 vc_req->ablkcipher_ctx = ctx;
453 vc_req->ablkcipher_req = req;
454
455 ret = __virtio_crypto_ablkcipher_do_req(vc_req, req, data_vq, 0);
456 if (ret < 0) {
457 pr_err("virtio_crypto: Decryption failed!\n");
458 return ret;
459 }
460
461 return -EINPROGRESS;
462}
463
464static int virtio_crypto_ablkcipher_init(struct crypto_tfm *tfm)
465{
466 struct virtio_crypto_ablkcipher_ctx *ctx = crypto_tfm_ctx(tfm);
467
468 tfm->crt_ablkcipher.reqsize = sizeof(struct virtio_crypto_request);
469 ctx->tfm = tfm;
470
471 return 0;
472}
473
474static void virtio_crypto_ablkcipher_exit(struct crypto_tfm *tfm)
475{
476 struct virtio_crypto_ablkcipher_ctx *ctx = crypto_tfm_ctx(tfm);
477
478 if (!ctx->vcrypto)
479 return;
480
481 virtio_crypto_alg_ablkcipher_close_session(ctx, 1);
482 virtio_crypto_alg_ablkcipher_close_session(ctx, 0);
483 virtcrypto_dev_put(ctx->vcrypto);
484 ctx->vcrypto = NULL;
485}
486
487static struct crypto_alg virtio_crypto_algs[] = { {
488 .cra_name = "cbc(aes)",
489 .cra_driver_name = "virtio_crypto_aes_cbc",
490 .cra_priority = 501,
491 .cra_flags = CRYPTO_ALG_TYPE_ABLKCIPHER | CRYPTO_ALG_ASYNC,
492 .cra_blocksize = AES_BLOCK_SIZE,
493 .cra_ctxsize = sizeof(struct virtio_crypto_ablkcipher_ctx),
494 .cra_alignmask = 0,
495 .cra_module = THIS_MODULE,
496 .cra_type = &crypto_ablkcipher_type,
497 .cra_init = virtio_crypto_ablkcipher_init,
498 .cra_exit = virtio_crypto_ablkcipher_exit,
499 .cra_u = {
500 .ablkcipher = {
501 .setkey = virtio_crypto_ablkcipher_setkey,
502 .decrypt = virtio_crypto_ablkcipher_decrypt,
503 .encrypt = virtio_crypto_ablkcipher_encrypt,
504 .min_keysize = AES_MIN_KEY_SIZE,
505 .max_keysize = AES_MAX_KEY_SIZE,
506 .ivsize = AES_BLOCK_SIZE,
507 },
508 },
509} };
510
511int virtio_crypto_algs_register(void)
512{
513 int ret = 0;
514
515 mutex_lock(&algs_lock);
516 if (++virtio_crypto_active_devs != 1)
517 goto unlock;
518
519 ret = crypto_register_algs(virtio_crypto_algs,
520 ARRAY_SIZE(virtio_crypto_algs));
521 if (ret)
522 virtio_crypto_active_devs--;
523
524unlock:
525 mutex_unlock(&algs_lock);
526 return ret;
527}
528
529void virtio_crypto_algs_unregister(void)
530{
531 mutex_lock(&algs_lock);
532 if (--virtio_crypto_active_devs != 0)
533 goto unlock;
534
535 crypto_unregister_algs(virtio_crypto_algs,
536 ARRAY_SIZE(virtio_crypto_algs));
537
538unlock:
539 mutex_unlock(&algs_lock);
540}
diff --git a/drivers/crypto/virtio/virtio_crypto_common.h b/drivers/crypto/virtio/virtio_crypto_common.h
new file mode 100644
index 000000000000..3d6566b02876
--- /dev/null
+++ b/drivers/crypto/virtio/virtio_crypto_common.h
@@ -0,0 +1,128 @@
1/* Common header for Virtio crypto device.
2 *
3 * Copyright 2016 HUAWEI TECHNOLOGIES CO., LTD.
4 *
5 * This program is free software; you can redistribute it and/or modify
6 * it under the terms of the GNU General Public License as published by
7 * the Free Software Foundation; either version 2 of the License, or
8 * (at your option) any later version.
9 *
10 * This program is distributed in the hope that it will be useful,
11 * but WITHOUT ANY WARRANTY; without even the implied warranty of
12 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
13 * GNU General Public License for more details.
14 *
15 * You should have received a copy of the GNU General Public License
16 * along with this program; if not, see <http://www.gnu.org/licenses/>.
17 */
18
19#ifndef _VIRTIO_CRYPTO_COMMON_H
20#define _VIRTIO_CRYPTO_COMMON_H
21
22#include <linux/virtio.h>
23#include <linux/crypto.h>
24#include <linux/spinlock.h>
25#include <crypto/aead.h>
26#include <crypto/aes.h>
27#include <crypto/authenc.h>
28
29
30/* Internal representation of a data virtqueue */
31struct data_queue {
32 /* Virtqueue associated with this send _queue */
33 struct virtqueue *vq;
34
35 /* To protect the vq operations for the dataq */
36 spinlock_t lock;
37
38 /* Name of the tx queue: dataq.$index */
39 char name[32];
40};
41
42struct virtio_crypto {
43 struct virtio_device *vdev;
44 struct virtqueue *ctrl_vq;
45 struct data_queue *data_vq;
46
47 /* To protect the vq operations for the controlq */
48 spinlock_t ctrl_lock;
49
50 /* Maximum of data queues supported by the device */
51 u32 max_data_queues;
52
53 /* Number of queue currently used by the driver */
54 u32 curr_queue;
55
56 /* Maximum length of cipher key */
57 u32 max_cipher_key_len;
58 /* Maximum length of authenticated key */
59 u32 max_auth_key_len;
60 /* Maximum size of per request */
61 u64 max_size;
62
63 /* Control VQ buffers: protected by the ctrl_lock */
64 struct virtio_crypto_op_ctrl_req ctrl;
65 struct virtio_crypto_session_input input;
66 struct virtio_crypto_inhdr ctrl_status;
67
68 unsigned long status;
69 atomic_t ref_count;
70 struct list_head list;
71 struct module *owner;
72 uint8_t dev_id;
73
74 /* Does the affinity hint is set for virtqueues? */
75 bool affinity_hint_set;
76};
77
78struct virtio_crypto_sym_session_info {
79 /* Backend session id, which come from the host side */
80 __u64 session_id;
81};
82
83struct virtio_crypto_ablkcipher_ctx {
84 struct virtio_crypto *vcrypto;
85 struct crypto_tfm *tfm;
86
87 struct virtio_crypto_sym_session_info enc_sess_info;
88 struct virtio_crypto_sym_session_info dec_sess_info;
89};
90
91struct virtio_crypto_request {
92 /* Cipher or aead */
93 uint32_t type;
94 uint8_t status;
95 struct virtio_crypto_ablkcipher_ctx *ablkcipher_ctx;
96 struct ablkcipher_request *ablkcipher_req;
97 struct virtio_crypto_op_data_req *req_data;
98 struct scatterlist **sgs;
99 uint8_t *iv;
100};
101
102int virtcrypto_devmgr_add_dev(struct virtio_crypto *vcrypto_dev);
103struct list_head *virtcrypto_devmgr_get_head(void);
104void virtcrypto_devmgr_rm_dev(struct virtio_crypto *vcrypto_dev);
105struct virtio_crypto *virtcrypto_devmgr_get_first(void);
106int virtcrypto_dev_in_use(struct virtio_crypto *vcrypto_dev);
107int virtcrypto_dev_get(struct virtio_crypto *vcrypto_dev);
108void virtcrypto_dev_put(struct virtio_crypto *vcrypto_dev);
109int virtcrypto_dev_started(struct virtio_crypto *vcrypto_dev);
110struct virtio_crypto *virtcrypto_get_dev_node(int node);
111int virtcrypto_dev_start(struct virtio_crypto *vcrypto);
112void virtcrypto_dev_stop(struct virtio_crypto *vcrypto);
113
114static inline int virtio_crypto_get_current_node(void)
115{
116 int cpu, node;
117
118 cpu = get_cpu();
119 node = topology_physical_package_id(cpu);
120 put_cpu();
121
122 return node;
123}
124
125int virtio_crypto_algs_register(void);
126void virtio_crypto_algs_unregister(void);
127
128#endif /* _VIRTIO_CRYPTO_COMMON_H */
diff --git a/drivers/crypto/virtio/virtio_crypto_core.c b/drivers/crypto/virtio/virtio_crypto_core.c
new file mode 100644
index 000000000000..fe70ec823b27
--- /dev/null
+++ b/drivers/crypto/virtio/virtio_crypto_core.c
@@ -0,0 +1,476 @@
1 /* Driver for Virtio crypto device.
2 *
3 * Copyright 2016 HUAWEI TECHNOLOGIES CO., LTD.
4 *
5 * This program is free software; you can redistribute it and/or modify
6 * it under the terms of the GNU General Public License as published by
7 * the Free Software Foundation; either version 2 of the License, or
8 * (at your option) any later version.
9 *
10 * This program is distributed in the hope that it will be useful,
11 * but WITHOUT ANY WARRANTY; without even the implied warranty of
12 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
13 * GNU General Public License for more details.
14 *
15 * You should have received a copy of the GNU General Public License
16 * along with this program; if not, see <http://www.gnu.org/licenses/>.
17 */
18
19#include <linux/err.h>
20#include <linux/module.h>
21#include <linux/virtio_config.h>
22#include <linux/cpu.h>
23
24#include <uapi/linux/virtio_crypto.h>
25#include "virtio_crypto_common.h"
26
27
28static void
29virtcrypto_clear_request(struct virtio_crypto_request *vc_req)
30{
31 if (vc_req) {
32 kzfree(vc_req->iv);
33 kzfree(vc_req->req_data);
34 kfree(vc_req->sgs);
35 }
36}
37
38static void virtcrypto_dataq_callback(struct virtqueue *vq)
39{
40 struct virtio_crypto *vcrypto = vq->vdev->priv;
41 struct virtio_crypto_request *vc_req;
42 unsigned long flags;
43 unsigned int len;
44 struct ablkcipher_request *ablk_req;
45 int error;
46 unsigned int qid = vq->index;
47
48 spin_lock_irqsave(&vcrypto->data_vq[qid].lock, flags);
49 do {
50 virtqueue_disable_cb(vq);
51 while ((vc_req = virtqueue_get_buf(vq, &len)) != NULL) {
52 if (vc_req->type == VIRTIO_CRYPTO_SYM_OP_CIPHER) {
53 switch (vc_req->status) {
54 case VIRTIO_CRYPTO_OK:
55 error = 0;
56 break;
57 case VIRTIO_CRYPTO_INVSESS:
58 case VIRTIO_CRYPTO_ERR:
59 error = -EINVAL;
60 break;
61 case VIRTIO_CRYPTO_BADMSG:
62 error = -EBADMSG;
63 break;
64 default:
65 error = -EIO;
66 break;
67 }
68 ablk_req = vc_req->ablkcipher_req;
69 virtcrypto_clear_request(vc_req);
70
71 spin_unlock_irqrestore(
72 &vcrypto->data_vq[qid].lock, flags);
73 /* Finish the encrypt or decrypt process */
74 ablk_req->base.complete(&ablk_req->base, error);
75 spin_lock_irqsave(
76 &vcrypto->data_vq[qid].lock, flags);
77 }
78 }
79 } while (!virtqueue_enable_cb(vq));
80 spin_unlock_irqrestore(&vcrypto->data_vq[qid].lock, flags);
81}
82
83static int virtcrypto_find_vqs(struct virtio_crypto *vi)
84{
85 vq_callback_t **callbacks;
86 struct virtqueue **vqs;
87 int ret = -ENOMEM;
88 int i, total_vqs;
89 const char **names;
90
91 /*
92 * We expect 1 data virtqueue, followed by
93 * possible N-1 data queues used in multiqueue mode,
94 * followed by control vq.
95 */
96 total_vqs = vi->max_data_queues + 1;
97
98 /* Allocate space for find_vqs parameters */
99 vqs = kcalloc(total_vqs, sizeof(*vqs), GFP_KERNEL);
100 if (!vqs)
101 goto err_vq;
102 callbacks = kcalloc(total_vqs, sizeof(*callbacks), GFP_KERNEL);
103 if (!callbacks)
104 goto err_callback;
105 names = kcalloc(total_vqs, sizeof(*names), GFP_KERNEL);
106 if (!names)
107 goto err_names;
108
109 /* Parameters for control virtqueue */
110 callbacks[total_vqs - 1] = NULL;
111 names[total_vqs - 1] = "controlq";
112
113 /* Allocate/initialize parameters for data virtqueues */
114 for (i = 0; i < vi->max_data_queues; i++) {
115 callbacks[i] = virtcrypto_dataq_callback;
116 snprintf(vi->data_vq[i].name, sizeof(vi->data_vq[i].name),
117 "dataq.%d", i);
118 names[i] = vi->data_vq[i].name;
119 }
120
121 ret = vi->vdev->config->find_vqs(vi->vdev, total_vqs, vqs, callbacks,
122 names);
123 if (ret)
124 goto err_find;
125
126 vi->ctrl_vq = vqs[total_vqs - 1];
127
128 for (i = 0; i < vi->max_data_queues; i++) {
129 spin_lock_init(&vi->data_vq[i].lock);
130 vi->data_vq[i].vq = vqs[i];
131 }
132
133 kfree(names);
134 kfree(callbacks);
135 kfree(vqs);
136
137 return 0;
138
139err_find:
140 kfree(names);
141err_names:
142 kfree(callbacks);
143err_callback:
144 kfree(vqs);
145err_vq:
146 return ret;
147}
148
149static int virtcrypto_alloc_queues(struct virtio_crypto *vi)
150{
151 vi->data_vq = kcalloc(vi->max_data_queues, sizeof(*vi->data_vq),
152 GFP_KERNEL);
153 if (!vi->data_vq)
154 return -ENOMEM;
155
156 return 0;
157}
158
159static void virtcrypto_clean_affinity(struct virtio_crypto *vi, long hcpu)
160{
161 int i;
162
163 if (vi->affinity_hint_set) {
164 for (i = 0; i < vi->max_data_queues; i++)
165 virtqueue_set_affinity(vi->data_vq[i].vq, -1);
166
167 vi->affinity_hint_set = false;
168 }
169}
170
171static void virtcrypto_set_affinity(struct virtio_crypto *vcrypto)
172{
173 int i = 0;
174 int cpu;
175
176 /*
177 * In single queue mode, we don't set the cpu affinity.
178 */
179 if (vcrypto->curr_queue == 1 || vcrypto->max_data_queues == 1) {
180 virtcrypto_clean_affinity(vcrypto, -1);
181 return;
182 }
183
184 /*
185 * In multiqueue mode, we let the queue to be private to one cpu
186 * by setting the affinity hint to eliminate the contention.
187 *
188 * TODO: adds cpu hotplug support by register cpu notifier.
189 *
190 */
191 for_each_online_cpu(cpu) {
192 virtqueue_set_affinity(vcrypto->data_vq[i].vq, cpu);
193 if (++i >= vcrypto->max_data_queues)
194 break;
195 }
196
197 vcrypto->affinity_hint_set = true;
198}
199
200static void virtcrypto_free_queues(struct virtio_crypto *vi)
201{
202 kfree(vi->data_vq);
203}
204
205static int virtcrypto_init_vqs(struct virtio_crypto *vi)
206{
207 int ret;
208
209 /* Allocate send & receive queues */
210 ret = virtcrypto_alloc_queues(vi);
211 if (ret)
212 goto err;
213
214 ret = virtcrypto_find_vqs(vi);
215 if (ret)
216 goto err_free;
217
218 get_online_cpus();
219 virtcrypto_set_affinity(vi);
220 put_online_cpus();
221
222 return 0;
223
224err_free:
225 virtcrypto_free_queues(vi);
226err:
227 return ret;
228}
229
230static int virtcrypto_update_status(struct virtio_crypto *vcrypto)
231{
232 u32 status;
233 int err;
234
235 virtio_cread(vcrypto->vdev,
236 struct virtio_crypto_config, status, &status);
237
238 /*
239 * Unknown status bits would be a host error and the driver
240 * should consider the device to be broken.
241 */
242 if (status & (~VIRTIO_CRYPTO_S_HW_READY)) {
243 dev_warn(&vcrypto->vdev->dev,
244 "Unknown status bits: 0x%x\n", status);
245
246 virtio_break_device(vcrypto->vdev);
247 return -EPERM;
248 }
249
250 if (vcrypto->status == status)
251 return 0;
252
253 vcrypto->status = status;
254
255 if (vcrypto->status & VIRTIO_CRYPTO_S_HW_READY) {
256 err = virtcrypto_dev_start(vcrypto);
257 if (err) {
258 dev_err(&vcrypto->vdev->dev,
259 "Failed to start virtio crypto device.\n");
260
261 return -EPERM;
262 }
263 dev_info(&vcrypto->vdev->dev, "Accelerator is ready\n");
264 } else {
265 virtcrypto_dev_stop(vcrypto);
266 dev_info(&vcrypto->vdev->dev, "Accelerator is not ready\n");
267 }
268
269 return 0;
270}
271
272static void virtcrypto_del_vqs(struct virtio_crypto *vcrypto)
273{
274 struct virtio_device *vdev = vcrypto->vdev;
275
276 virtcrypto_clean_affinity(vcrypto, -1);
277
278 vdev->config->del_vqs(vdev);
279
280 virtcrypto_free_queues(vcrypto);
281}
282
283static int virtcrypto_probe(struct virtio_device *vdev)
284{
285 int err = -EFAULT;
286 struct virtio_crypto *vcrypto;
287 u32 max_data_queues = 0, max_cipher_key_len = 0;
288 u32 max_auth_key_len = 0;
289 u64 max_size = 0;
290
291 if (!virtio_has_feature(vdev, VIRTIO_F_VERSION_1))
292 return -ENODEV;
293
294 if (!vdev->config->get) {
295 dev_err(&vdev->dev, "%s failure: config access disabled\n",
296 __func__);
297 return -EINVAL;
298 }
299
300 if (num_possible_nodes() > 1 && dev_to_node(&vdev->dev) < 0) {
301 /*
302 * If the accelerator is connected to a node with no memory
303 * there is no point in using the accelerator since the remote
304 * memory transaction will be very slow.
305 */
306 dev_err(&vdev->dev, "Invalid NUMA configuration.\n");
307 return -EINVAL;
308 }
309
310 vcrypto = kzalloc_node(sizeof(*vcrypto), GFP_KERNEL,
311 dev_to_node(&vdev->dev));
312 if (!vcrypto)
313 return -ENOMEM;
314
315 virtio_cread(vdev, struct virtio_crypto_config,
316 max_dataqueues, &max_data_queues);
317 if (max_data_queues < 1)
318 max_data_queues = 1;
319
320 virtio_cread(vdev, struct virtio_crypto_config,
321 max_cipher_key_len, &max_cipher_key_len);
322 virtio_cread(vdev, struct virtio_crypto_config,
323 max_auth_key_len, &max_auth_key_len);
324 virtio_cread(vdev, struct virtio_crypto_config,
325 max_size, &max_size);
326
327 /* Add virtio crypto device to global table */
328 err = virtcrypto_devmgr_add_dev(vcrypto);
329 if (err) {
330 dev_err(&vdev->dev, "Failed to add new virtio crypto device.\n");
331 goto free;
332 }
333 vcrypto->owner = THIS_MODULE;
334 vcrypto = vdev->priv = vcrypto;
335 vcrypto->vdev = vdev;
336
337 spin_lock_init(&vcrypto->ctrl_lock);
338
339 /* Use single data queue as default */
340 vcrypto->curr_queue = 1;
341 vcrypto->max_data_queues = max_data_queues;
342 vcrypto->max_cipher_key_len = max_cipher_key_len;
343 vcrypto->max_auth_key_len = max_auth_key_len;
344 vcrypto->max_size = max_size;
345
346 dev_info(&vdev->dev,
347 "max_queues: %u, max_cipher_key_len: %u, max_auth_key_len: %u, max_size 0x%llx\n",
348 vcrypto->max_data_queues,
349 vcrypto->max_cipher_key_len,
350 vcrypto->max_auth_key_len,
351 vcrypto->max_size);
352
353 err = virtcrypto_init_vqs(vcrypto);
354 if (err) {
355 dev_err(&vdev->dev, "Failed to initialize vqs.\n");
356 goto free_dev;
357 }
358 virtio_device_ready(vdev);
359
360 err = virtcrypto_update_status(vcrypto);
361 if (err)
362 goto free_vqs;
363
364 return 0;
365
366free_vqs:
367 vcrypto->vdev->config->reset(vdev);
368 virtcrypto_del_vqs(vcrypto);
369free_dev:
370 virtcrypto_devmgr_rm_dev(vcrypto);
371free:
372 kfree(vcrypto);
373 return err;
374}
375
376static void virtcrypto_free_unused_reqs(struct virtio_crypto *vcrypto)
377{
378 struct virtio_crypto_request *vc_req;
379 int i;
380 struct virtqueue *vq;
381
382 for (i = 0; i < vcrypto->max_data_queues; i++) {
383 vq = vcrypto->data_vq[i].vq;
384 while ((vc_req = virtqueue_detach_unused_buf(vq)) != NULL) {
385 kfree(vc_req->req_data);
386 kfree(vc_req->sgs);
387 }
388 }
389}
390
391static void virtcrypto_remove(struct virtio_device *vdev)
392{
393 struct virtio_crypto *vcrypto = vdev->priv;
394
395 dev_info(&vdev->dev, "Start virtcrypto_remove.\n");
396
397 if (virtcrypto_dev_started(vcrypto))
398 virtcrypto_dev_stop(vcrypto);
399 vdev->config->reset(vdev);
400 virtcrypto_free_unused_reqs(vcrypto);
401 virtcrypto_del_vqs(vcrypto);
402 virtcrypto_devmgr_rm_dev(vcrypto);
403 kfree(vcrypto);
404}
405
406static void virtcrypto_config_changed(struct virtio_device *vdev)
407{
408 struct virtio_crypto *vcrypto = vdev->priv;
409
410 virtcrypto_update_status(vcrypto);
411}
412
413#ifdef CONFIG_PM_SLEEP
414static int virtcrypto_freeze(struct virtio_device *vdev)
415{
416 struct virtio_crypto *vcrypto = vdev->priv;
417
418 vdev->config->reset(vdev);
419 virtcrypto_free_unused_reqs(vcrypto);
420 if (virtcrypto_dev_started(vcrypto))
421 virtcrypto_dev_stop(vcrypto);
422
423 virtcrypto_del_vqs(vcrypto);
424 return 0;
425}
426
427static int virtcrypto_restore(struct virtio_device *vdev)
428{
429 struct virtio_crypto *vcrypto = vdev->priv;
430 int err;
431
432 err = virtcrypto_init_vqs(vcrypto);
433 if (err)
434 return err;
435
436 virtio_device_ready(vdev);
437 err = virtcrypto_dev_start(vcrypto);
438 if (err) {
439 dev_err(&vdev->dev, "Failed to start virtio crypto device.\n");
440 return -EFAULT;
441 }
442
443 return 0;
444}
445#endif
446
447static unsigned int features[] = {
448 /* none */
449};
450
451static struct virtio_device_id id_table[] = {
452 { VIRTIO_ID_CRYPTO, VIRTIO_DEV_ANY_ID },
453 { 0 },
454};
455
456static struct virtio_driver virtio_crypto_driver = {
457 .driver.name = KBUILD_MODNAME,
458 .driver.owner = THIS_MODULE,
459 .feature_table = features,
460 .feature_table_size = ARRAY_SIZE(features),
461 .id_table = id_table,
462 .probe = virtcrypto_probe,
463 .remove = virtcrypto_remove,
464 .config_changed = virtcrypto_config_changed,
465#ifdef CONFIG_PM_SLEEP
466 .freeze = virtcrypto_freeze,
467 .restore = virtcrypto_restore,
468#endif
469};
470
471module_virtio_driver(virtio_crypto_driver);
472
473MODULE_DEVICE_TABLE(virtio, id_table);
474MODULE_DESCRIPTION("virtio crypto device driver");
475MODULE_LICENSE("GPL");
476MODULE_AUTHOR("Gonglei <arei.gonglei@huawei.com>");
diff --git a/drivers/crypto/virtio/virtio_crypto_mgr.c b/drivers/crypto/virtio/virtio_crypto_mgr.c
new file mode 100644
index 000000000000..a69ff71de2c4
--- /dev/null
+++ b/drivers/crypto/virtio/virtio_crypto_mgr.c
@@ -0,0 +1,264 @@
1 /* Management for virtio crypto devices (refer to adf_dev_mgr.c)
2 *
3 * Copyright 2016 HUAWEI TECHNOLOGIES CO., LTD.
4 *
5 * This program is free software; you can redistribute it and/or modify
6 * it under the terms of the GNU General Public License as published by
7 * the Free Software Foundation; either version 2 of the License, or
8 * (at your option) any later version.
9 *
10 * This program is distributed in the hope that it will be useful,
11 * but WITHOUT ANY WARRANTY; without even the implied warranty of
12 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
13 * GNU General Public License for more details.
14 *
15 * You should have received a copy of the GNU General Public License
16 * along with this program; if not, see <http://www.gnu.org/licenses/>.
17 */
18
19#include <linux/mutex.h>
20#include <linux/list.h>
21#include <linux/module.h>
22
23#include <uapi/linux/virtio_crypto.h>
24#include "virtio_crypto_common.h"
25
26static LIST_HEAD(virtio_crypto_table);
27static uint32_t num_devices;
28
29/* The table_lock protects the above global list and num_devices */
30static DEFINE_MUTEX(table_lock);
31
32#define VIRTIO_CRYPTO_MAX_DEVICES 32
33
34
35/*
36 * virtcrypto_devmgr_add_dev() - Add vcrypto_dev to the acceleration
37 * framework.
38 * @vcrypto_dev: Pointer to virtio crypto device.
39 *
40 * Function adds virtio crypto device to the global list.
41 * To be used by virtio crypto device specific drivers.
42 *
43 * Return: 0 on success, error code othewise.
44 */
45int virtcrypto_devmgr_add_dev(struct virtio_crypto *vcrypto_dev)
46{
47 struct list_head *itr;
48
49 mutex_lock(&table_lock);
50 if (num_devices == VIRTIO_CRYPTO_MAX_DEVICES) {
51 pr_info("virtio_crypto: only support up to %d devices\n",
52 VIRTIO_CRYPTO_MAX_DEVICES);
53 mutex_unlock(&table_lock);
54 return -EFAULT;
55 }
56
57 list_for_each(itr, &virtio_crypto_table) {
58 struct virtio_crypto *ptr =
59 list_entry(itr, struct virtio_crypto, list);
60
61 if (ptr == vcrypto_dev) {
62 mutex_unlock(&table_lock);
63 return -EEXIST;
64 }
65 }
66 atomic_set(&vcrypto_dev->ref_count, 0);
67 list_add_tail(&vcrypto_dev->list, &virtio_crypto_table);
68 vcrypto_dev->dev_id = num_devices++;
69 mutex_unlock(&table_lock);
70 return 0;
71}
72
73struct list_head *virtcrypto_devmgr_get_head(void)
74{
75 return &virtio_crypto_table;
76}
77
78/*
79 * virtcrypto_devmgr_rm_dev() - Remove vcrypto_dev from the acceleration
80 * framework.
81 * @vcrypto_dev: Pointer to virtio crypto device.
82 *
83 * Function removes virtio crypto device from the acceleration framework.
84 * To be used by virtio crypto device specific drivers.
85 *
86 * Return: void
87 */
88void virtcrypto_devmgr_rm_dev(struct virtio_crypto *vcrypto_dev)
89{
90 mutex_lock(&table_lock);
91 list_del(&vcrypto_dev->list);
92 num_devices--;
93 mutex_unlock(&table_lock);
94}
95
96/*
97 * virtcrypto_devmgr_get_first()
98 *
99 * Function returns the first virtio crypto device from the acceleration
100 * framework.
101 *
102 * To be used by virtio crypto device specific drivers.
103 *
104 * Return: pointer to vcrypto_dev or NULL if not found.
105 */
106struct virtio_crypto *virtcrypto_devmgr_get_first(void)
107{
108 struct virtio_crypto *dev = NULL;
109
110 mutex_lock(&table_lock);
111 if (!list_empty(&virtio_crypto_table))
112 dev = list_first_entry(&virtio_crypto_table,
113 struct virtio_crypto,
114 list);
115 mutex_unlock(&table_lock);
116 return dev;
117}
118
119/*
120 * virtcrypto_dev_in_use() - Check whether vcrypto_dev is currently in use
121 * @vcrypto_dev: Pointer to virtio crypto device.
122 *
123 * To be used by virtio crypto device specific drivers.
124 *
125 * Return: 1 when device is in use, 0 otherwise.
126 */
127int virtcrypto_dev_in_use(struct virtio_crypto *vcrypto_dev)
128{
129 return atomic_read(&vcrypto_dev->ref_count) != 0;
130}
131
132/*
133 * virtcrypto_dev_get() - Increment vcrypto_dev reference count
134 * @vcrypto_dev: Pointer to virtio crypto device.
135 *
136 * Increment the vcrypto_dev refcount and if this is the first time
137 * incrementing it during this period the vcrypto_dev is in use,
138 * increment the module refcount too.
139 * To be used by virtio crypto device specific drivers.
140 *
141 * Return: 0 when successful, EFAULT when fail to bump module refcount
142 */
143int virtcrypto_dev_get(struct virtio_crypto *vcrypto_dev)
144{
145 if (atomic_add_return(1, &vcrypto_dev->ref_count) == 1)
146 if (!try_module_get(vcrypto_dev->owner))
147 return -EFAULT;
148 return 0;
149}
150
151/*
152 * virtcrypto_dev_put() - Decrement vcrypto_dev reference count
153 * @vcrypto_dev: Pointer to virtio crypto device.
154 *
155 * Decrement the vcrypto_dev refcount and if this is the last time
156 * decrementing it during this period the vcrypto_dev is in use,
157 * decrement the module refcount too.
158 * To be used by virtio crypto device specific drivers.
159 *
160 * Return: void
161 */
162void virtcrypto_dev_put(struct virtio_crypto *vcrypto_dev)
163{
164 if (atomic_sub_return(1, &vcrypto_dev->ref_count) == 0)
165 module_put(vcrypto_dev->owner);
166}
167
168/*
169 * virtcrypto_dev_started() - Check whether device has started
170 * @vcrypto_dev: Pointer to virtio crypto device.
171 *
172 * To be used by virtio crypto device specific drivers.
173 *
174 * Return: 1 when the device has started, 0 otherwise
175 */
176int virtcrypto_dev_started(struct virtio_crypto *vcrypto_dev)
177{
178 return (vcrypto_dev->status & VIRTIO_CRYPTO_S_HW_READY);
179}
180
181/*
182 * virtcrypto_get_dev_node() - Get vcrypto_dev on the node.
183 * @node: Node id the driver works.
184 *
185 * Function returns the virtio crypto device used fewest on the node.
186 *
187 * To be used by virtio crypto device specific drivers.
188 *
189 * Return: pointer to vcrypto_dev or NULL if not found.
190 */
191struct virtio_crypto *virtcrypto_get_dev_node(int node)
192{
193 struct virtio_crypto *vcrypto_dev = NULL, *tmp_dev;
194 unsigned long best = ~0;
195 unsigned long ctr;
196
197 mutex_lock(&table_lock);
198 list_for_each_entry(tmp_dev, virtcrypto_devmgr_get_head(), list) {
199
200 if ((node == dev_to_node(&tmp_dev->vdev->dev) ||
201 dev_to_node(&tmp_dev->vdev->dev) < 0) &&
202 virtcrypto_dev_started(tmp_dev)) {
203 ctr = atomic_read(&tmp_dev->ref_count);
204 if (best > ctr) {
205 vcrypto_dev = tmp_dev;
206 best = ctr;
207 }
208 }
209 }
210
211 if (!vcrypto_dev) {
212 pr_info("virtio_crypto: Could not find a device on node %d\n",
213 node);
214 /* Get any started device */
215 list_for_each_entry(tmp_dev,
216 virtcrypto_devmgr_get_head(), list) {
217 if (virtcrypto_dev_started(tmp_dev)) {
218 vcrypto_dev = tmp_dev;
219 break;
220 }
221 }
222 }
223 mutex_unlock(&table_lock);
224 if (!vcrypto_dev)
225 return NULL;
226
227 virtcrypto_dev_get(vcrypto_dev);
228 return vcrypto_dev;
229}
230
231/*
232 * virtcrypto_dev_start() - Start virtio crypto device
233 * @vcrypto: Pointer to virtio crypto device.
234 *
235 * Function notifies all the registered services that the virtio crypto device
236 * is ready to be used.
237 * To be used by virtio crypto device specific drivers.
238 *
239 * Return: 0 on success, EFAULT when fail to register algorithms
240 */
241int virtcrypto_dev_start(struct virtio_crypto *vcrypto)
242{
243 if (virtio_crypto_algs_register()) {
244 pr_err("virtio_crypto: Failed to register crypto algs\n");
245 return -EFAULT;
246 }
247
248 return 0;
249}
250
251/*
252 * virtcrypto_dev_stop() - Stop virtio crypto device
253 * @vcrypto: Pointer to virtio crypto device.
254 *
255 * Function notifies all the registered services that the virtio crypto device
256 * is ready to be used.
257 * To be used by virtio crypto device specific drivers.
258 *
259 * Return: void
260 */
261void virtcrypto_dev_stop(struct virtio_crypto *vcrypto)
262{
263 virtio_crypto_algs_unregister();
264}
diff --git a/include/uapi/linux/Kbuild b/include/uapi/linux/Kbuild
index bc2ef9fef7c8..a26c5c76ab62 100644
--- a/include/uapi/linux/Kbuild
+++ b/include/uapi/linux/Kbuild
@@ -462,6 +462,7 @@ header-y += virtio_rng.h
462header-y += virtio_scsi.h 462header-y += virtio_scsi.h
463header-y += virtio_types.h 463header-y += virtio_types.h
464header-y += virtio_vsock.h 464header-y += virtio_vsock.h
465header-y += virtio_crypto.h
465header-y += vm_sockets.h 466header-y += vm_sockets.h
466header-y += vt.h 467header-y += vt.h
467header-y += vtpm_proxy.h 468header-y += vtpm_proxy.h
diff --git a/include/uapi/linux/virtio_crypto.h b/include/uapi/linux/virtio_crypto.h
new file mode 100644
index 000000000000..50cdc8aebfcf
--- /dev/null
+++ b/include/uapi/linux/virtio_crypto.h
@@ -0,0 +1,450 @@
1#ifndef _VIRTIO_CRYPTO_H
2#define _VIRTIO_CRYPTO_H
3/* This header is BSD licensed so anyone can use the definitions to implement
4 * compatible drivers/servers.
5 *
6 * Redistribution and use in source and binary forms, with or without
7 * modification, are permitted provided that the following conditions
8 * are met:
9 * 1. Redistributions of source code must retain the above copyright
10 * notice, this list of conditions and the following disclaimer.
11 * 2. Redistributions in binary form must reproduce the above copyright
12 * notice, this list of conditions and the following disclaimer in the
13 * documentation and/or other materials provided with the distribution.
14 * 3. Neither the name of IBM nor the names of its contributors
15 * may be used to endorse or promote products derived from this software
16 * without specific prior written permission.
17 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
18 * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
19 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
20 * FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL IBM OR
21 * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
22 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
23 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF
24 * USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
25 * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
26 * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT
27 * OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
28 * SUCH DAMAGE.
29 */
30#include <linux/types.h>
31#include <linux/virtio_types.h>
32#include <linux/virtio_ids.h>
33#include <linux/virtio_config.h>
34
35
36#define VIRTIO_CRYPTO_SERVICE_CIPHER 0
37#define VIRTIO_CRYPTO_SERVICE_HASH 1
38#define VIRTIO_CRYPTO_SERVICE_MAC 2
39#define VIRTIO_CRYPTO_SERVICE_AEAD 3
40
41#define VIRTIO_CRYPTO_OPCODE(service, op) (((service) << 8) | (op))
42
43struct virtio_crypto_ctrl_header {
44#define VIRTIO_CRYPTO_CIPHER_CREATE_SESSION \
45 VIRTIO_CRYPTO_OPCODE(VIRTIO_CRYPTO_SERVICE_CIPHER, 0x02)
46#define VIRTIO_CRYPTO_CIPHER_DESTROY_SESSION \
47 VIRTIO_CRYPTO_OPCODE(VIRTIO_CRYPTO_SERVICE_CIPHER, 0x03)
48#define VIRTIO_CRYPTO_HASH_CREATE_SESSION \
49 VIRTIO_CRYPTO_OPCODE(VIRTIO_CRYPTO_SERVICE_HASH, 0x02)
50#define VIRTIO_CRYPTO_HASH_DESTROY_SESSION \
51 VIRTIO_CRYPTO_OPCODE(VIRTIO_CRYPTO_SERVICE_HASH, 0x03)
52#define VIRTIO_CRYPTO_MAC_CREATE_SESSION \
53 VIRTIO_CRYPTO_OPCODE(VIRTIO_CRYPTO_SERVICE_MAC, 0x02)
54#define VIRTIO_CRYPTO_MAC_DESTROY_SESSION \
55 VIRTIO_CRYPTO_OPCODE(VIRTIO_CRYPTO_SERVICE_MAC, 0x03)
56#define VIRTIO_CRYPTO_AEAD_CREATE_SESSION \
57 VIRTIO_CRYPTO_OPCODE(VIRTIO_CRYPTO_SERVICE_AEAD, 0x02)
58#define VIRTIO_CRYPTO_AEAD_DESTROY_SESSION \
59 VIRTIO_CRYPTO_OPCODE(VIRTIO_CRYPTO_SERVICE_AEAD, 0x03)
60 __le32 opcode;
61 __le32 algo;
62 __le32 flag;
63 /* data virtqueue id */
64 __le32 queue_id;
65};
66
67struct virtio_crypto_cipher_session_para {
68#define VIRTIO_CRYPTO_NO_CIPHER 0
69#define VIRTIO_CRYPTO_CIPHER_ARC4 1
70#define VIRTIO_CRYPTO_CIPHER_AES_ECB 2
71#define VIRTIO_CRYPTO_CIPHER_AES_CBC 3
72#define VIRTIO_CRYPTO_CIPHER_AES_CTR 4
73#define VIRTIO_CRYPTO_CIPHER_DES_ECB 5
74#define VIRTIO_CRYPTO_CIPHER_DES_CBC 6
75#define VIRTIO_CRYPTO_CIPHER_3DES_ECB 7
76#define VIRTIO_CRYPTO_CIPHER_3DES_CBC 8
77#define VIRTIO_CRYPTO_CIPHER_3DES_CTR 9
78#define VIRTIO_CRYPTO_CIPHER_KASUMI_F8 10
79#define VIRTIO_CRYPTO_CIPHER_SNOW3G_UEA2 11
80#define VIRTIO_CRYPTO_CIPHER_AES_F8 12
81#define VIRTIO_CRYPTO_CIPHER_AES_XTS 13
82#define VIRTIO_CRYPTO_CIPHER_ZUC_EEA3 14
83 __le32 algo;
84 /* length of key */
85 __le32 keylen;
86
87#define VIRTIO_CRYPTO_OP_ENCRYPT 1
88#define VIRTIO_CRYPTO_OP_DECRYPT 2
89 /* encrypt or decrypt */
90 __le32 op;
91 __le32 padding;
92};
93
94struct virtio_crypto_session_input {
95 /* Device-writable part */
96 __le64 session_id;
97 __le32 status;
98 __le32 padding;
99};
100
101struct virtio_crypto_cipher_session_req {
102 struct virtio_crypto_cipher_session_para para;
103 __u8 padding[32];
104};
105
106struct virtio_crypto_hash_session_para {
107#define VIRTIO_CRYPTO_NO_HASH 0
108#define VIRTIO_CRYPTO_HASH_MD5 1
109#define VIRTIO_CRYPTO_HASH_SHA1 2
110#define VIRTIO_CRYPTO_HASH_SHA_224 3
111#define VIRTIO_CRYPTO_HASH_SHA_256 4
112#define VIRTIO_CRYPTO_HASH_SHA_384 5
113#define VIRTIO_CRYPTO_HASH_SHA_512 6
114#define VIRTIO_CRYPTO_HASH_SHA3_224 7
115#define VIRTIO_CRYPTO_HASH_SHA3_256 8
116#define VIRTIO_CRYPTO_HASH_SHA3_384 9
117#define VIRTIO_CRYPTO_HASH_SHA3_512 10
118#define VIRTIO_CRYPTO_HASH_SHA3_SHAKE128 11
119#define VIRTIO_CRYPTO_HASH_SHA3_SHAKE256 12
120 __le32 algo;
121 /* hash result length */
122 __le32 hash_result_len;
123 __u8 padding[8];
124};
125
126struct virtio_crypto_hash_create_session_req {
127 struct virtio_crypto_hash_session_para para;
128 __u8 padding[40];
129};
130
131struct virtio_crypto_mac_session_para {
132#define VIRTIO_CRYPTO_NO_MAC 0
133#define VIRTIO_CRYPTO_MAC_HMAC_MD5 1
134#define VIRTIO_CRYPTO_MAC_HMAC_SHA1 2
135#define VIRTIO_CRYPTO_MAC_HMAC_SHA_224 3
136#define VIRTIO_CRYPTO_MAC_HMAC_SHA_256 4
137#define VIRTIO_CRYPTO_MAC_HMAC_SHA_384 5
138#define VIRTIO_CRYPTO_MAC_HMAC_SHA_512 6
139#define VIRTIO_CRYPTO_MAC_CMAC_3DES 25
140#define VIRTIO_CRYPTO_MAC_CMAC_AES 26
141#define VIRTIO_CRYPTO_MAC_KASUMI_F9 27
142#define VIRTIO_CRYPTO_MAC_SNOW3G_UIA2 28
143#define VIRTIO_CRYPTO_MAC_GMAC_AES 41
144#define VIRTIO_CRYPTO_MAC_GMAC_TWOFISH 42
145#define VIRTIO_CRYPTO_MAC_CBCMAC_AES 49
146#define VIRTIO_CRYPTO_MAC_CBCMAC_KASUMI_F9 50
147#define VIRTIO_CRYPTO_MAC_XCBC_AES 53
148 __le32 algo;
149 /* hash result length */
150 __le32 hash_result_len;
151 /* length of authenticated key */
152 __le32 auth_key_len;
153 __le32 padding;
154};
155
156struct virtio_crypto_mac_create_session_req {
157 struct virtio_crypto_mac_session_para para;
158 __u8 padding[40];
159};
160
161struct virtio_crypto_aead_session_para {
162#define VIRTIO_CRYPTO_NO_AEAD 0
163#define VIRTIO_CRYPTO_AEAD_GCM 1
164#define VIRTIO_CRYPTO_AEAD_CCM 2
165#define VIRTIO_CRYPTO_AEAD_CHACHA20_POLY1305 3
166 __le32 algo;
167 /* length of key */
168 __le32 key_len;
169 /* hash result length */
170 __le32 hash_result_len;
171 /* length of the additional authenticated data (AAD) in bytes */
172 __le32 aad_len;
173 /* encrypt or decrypt, See above VIRTIO_CRYPTO_OP_* */
174 __le32 op;
175 __le32 padding;
176};
177
178struct virtio_crypto_aead_create_session_req {
179 struct virtio_crypto_aead_session_para para;
180 __u8 padding[32];
181};
182
183struct virtio_crypto_alg_chain_session_para {
184#define VIRTIO_CRYPTO_SYM_ALG_CHAIN_ORDER_HASH_THEN_CIPHER 1
185#define VIRTIO_CRYPTO_SYM_ALG_CHAIN_ORDER_CIPHER_THEN_HASH 2
186 __le32 alg_chain_order;
187/* Plain hash */
188#define VIRTIO_CRYPTO_SYM_HASH_MODE_PLAIN 1
189/* Authenticated hash (mac) */
190#define VIRTIO_CRYPTO_SYM_HASH_MODE_AUTH 2
191/* Nested hash */
192#define VIRTIO_CRYPTO_SYM_HASH_MODE_NESTED 3
193 __le32 hash_mode;
194 struct virtio_crypto_cipher_session_para cipher_param;
195 union {
196 struct virtio_crypto_hash_session_para hash_param;
197 struct virtio_crypto_mac_session_para mac_param;
198 __u8 padding[16];
199 } u;
200 /* length of the additional authenticated data (AAD) in bytes */
201 __le32 aad_len;
202 __le32 padding;
203};
204
205struct virtio_crypto_alg_chain_session_req {
206 struct virtio_crypto_alg_chain_session_para para;
207};
208
209struct virtio_crypto_sym_create_session_req {
210 union {
211 struct virtio_crypto_cipher_session_req cipher;
212 struct virtio_crypto_alg_chain_session_req chain;
213 __u8 padding[48];
214 } u;
215
216 /* Device-readable part */
217
218/* No operation */
219#define VIRTIO_CRYPTO_SYM_OP_NONE 0
220/* Cipher only operation on the data */
221#define VIRTIO_CRYPTO_SYM_OP_CIPHER 1
222/*
223 * Chain any cipher with any hash or mac operation. The order
224 * depends on the value of alg_chain_order param
225 */
226#define VIRTIO_CRYPTO_SYM_OP_ALGORITHM_CHAINING 2
227 __le32 op_type;
228 __le32 padding;
229};
230
231struct virtio_crypto_destroy_session_req {
232 /* Device-readable part */
233 __le64 session_id;
234 __u8 padding[48];
235};
236
237/* The request of the control virtqueue's packet */
238struct virtio_crypto_op_ctrl_req {
239 struct virtio_crypto_ctrl_header header;
240
241 union {
242 struct virtio_crypto_sym_create_session_req
243 sym_create_session;
244 struct virtio_crypto_hash_create_session_req
245 hash_create_session;
246 struct virtio_crypto_mac_create_session_req
247 mac_create_session;
248 struct virtio_crypto_aead_create_session_req
249 aead_create_session;
250 struct virtio_crypto_destroy_session_req
251 destroy_session;
252 __u8 padding[56];
253 } u;
254};
255
256struct virtio_crypto_op_header {
257#define VIRTIO_CRYPTO_CIPHER_ENCRYPT \
258 VIRTIO_CRYPTO_OPCODE(VIRTIO_CRYPTO_SERVICE_CIPHER, 0x00)
259#define VIRTIO_CRYPTO_CIPHER_DECRYPT \
260 VIRTIO_CRYPTO_OPCODE(VIRTIO_CRYPTO_SERVICE_CIPHER, 0x01)
261#define VIRTIO_CRYPTO_HASH \
262 VIRTIO_CRYPTO_OPCODE(VIRTIO_CRYPTO_SERVICE_HASH, 0x00)
263#define VIRTIO_CRYPTO_MAC \
264 VIRTIO_CRYPTO_OPCODE(VIRTIO_CRYPTO_SERVICE_MAC, 0x00)
265#define VIRTIO_CRYPTO_AEAD_ENCRYPT \
266 VIRTIO_CRYPTO_OPCODE(VIRTIO_CRYPTO_SERVICE_AEAD, 0x00)
267#define VIRTIO_CRYPTO_AEAD_DECRYPT \
268 VIRTIO_CRYPTO_OPCODE(VIRTIO_CRYPTO_SERVICE_AEAD, 0x01)
269 __le32 opcode;
270 /* algo should be service-specific algorithms */
271 __le32 algo;
272 /* session_id should be service-specific algorithms */
273 __le64 session_id;
274 /* control flag to control the request */
275 __le32 flag;
276 __le32 padding;
277};
278
279struct virtio_crypto_cipher_para {
280 /*
281 * Byte Length of valid IV/Counter
282 *
283 * For block ciphers in CBC or F8 mode, or for Kasumi in F8 mode, or for
284 * SNOW3G in UEA2 mode, this is the length of the IV (which
285 * must be the same as the block length of the cipher).
286 * For block ciphers in CTR mode, this is the length of the counter
287 * (which must be the same as the block length of the cipher).
288 * For AES-XTS, this is the 128bit tweak, i, from IEEE Std 1619-2007.
289 *
290 * The IV/Counter will be updated after every partial cryptographic
291 * operation.
292 */
293 __le32 iv_len;
294 /* length of source data */
295 __le32 src_data_len;
296 /* length of dst data */
297 __le32 dst_data_len;
298 __le32 padding;
299};
300
301struct virtio_crypto_hash_para {
302 /* length of source data */
303 __le32 src_data_len;
304 /* hash result length */
305 __le32 hash_result_len;
306};
307
308struct virtio_crypto_mac_para {
309 struct virtio_crypto_hash_para hash;
310};
311
312struct virtio_crypto_aead_para {
313 /*
314 * Byte Length of valid IV data pointed to by the below iv_addr
315 * parameter.
316 *
317 * For GCM mode, this is either 12 (for 96-bit IVs) or 16, in which
318 * case iv_addr points to J0.
319 * For CCM mode, this is the length of the nonce, which can be in the
320 * range 7 to 13 inclusive.
321 */
322 __le32 iv_len;
323 /* length of additional auth data */
324 __le32 aad_len;
325 /* length of source data */
326 __le32 src_data_len;
327 /* length of dst data */
328 __le32 dst_data_len;
329};
330
331struct virtio_crypto_cipher_data_req {
332 /* Device-readable part */
333 struct virtio_crypto_cipher_para para;
334 __u8 padding[24];
335};
336
337struct virtio_crypto_hash_data_req {
338 /* Device-readable part */
339 struct virtio_crypto_hash_para para;
340 __u8 padding[40];
341};
342
343struct virtio_crypto_mac_data_req {
344 /* Device-readable part */
345 struct virtio_crypto_mac_para para;
346 __u8 padding[40];
347};
348
349struct virtio_crypto_alg_chain_data_para {
350 __le32 iv_len;
351 /* Length of source data */
352 __le32 src_data_len;
353 /* Length of destination data */
354 __le32 dst_data_len;
355 /* Starting point for cipher processing in source data */
356 __le32 cipher_start_src_offset;
357 /* Length of the source data that the cipher will be computed on */
358 __le32 len_to_cipher;
359 /* Starting point for hash processing in source data */
360 __le32 hash_start_src_offset;
361 /* Length of the source data that the hash will be computed on */
362 __le32 len_to_hash;
363 /* Length of the additional auth data */
364 __le32 aad_len;
365 /* Length of the hash result */
366 __le32 hash_result_len;
367 __le32 reserved;
368};
369
370struct virtio_crypto_alg_chain_data_req {
371 /* Device-readable part */
372 struct virtio_crypto_alg_chain_data_para para;
373};
374
375struct virtio_crypto_sym_data_req {
376 union {
377 struct virtio_crypto_cipher_data_req cipher;
378 struct virtio_crypto_alg_chain_data_req chain;
379 __u8 padding[40];
380 } u;
381
382 /* See above VIRTIO_CRYPTO_SYM_OP_* */
383 __le32 op_type;
384 __le32 padding;
385};
386
387struct virtio_crypto_aead_data_req {
388 /* Device-readable part */
389 struct virtio_crypto_aead_para para;
390 __u8 padding[32];
391};
392
393/* The request of the data virtqueue's packet */
394struct virtio_crypto_op_data_req {
395 struct virtio_crypto_op_header header;
396
397 union {
398 struct virtio_crypto_sym_data_req sym_req;
399 struct virtio_crypto_hash_data_req hash_req;
400 struct virtio_crypto_mac_data_req mac_req;
401 struct virtio_crypto_aead_data_req aead_req;
402 __u8 padding[48];
403 } u;
404};
405
406#define VIRTIO_CRYPTO_OK 0
407#define VIRTIO_CRYPTO_ERR 1
408#define VIRTIO_CRYPTO_BADMSG 2
409#define VIRTIO_CRYPTO_NOTSUPP 3
410#define VIRTIO_CRYPTO_INVSESS 4 /* Invalid session id */
411
412/* The accelerator hardware is ready */
413#define VIRTIO_CRYPTO_S_HW_READY (1 << 0)
414
415struct virtio_crypto_config {
416 /* See VIRTIO_CRYPTO_OP_* above */
417 __u32 status;
418
419 /*
420 * Maximum number of data queue
421 */
422 __u32 max_dataqueues;
423
424 /*
425 * Specifies the services mask which the device support,
426 * see VIRTIO_CRYPTO_SERVICE_* above
427 */
428 __u32 crypto_services;
429
430 /* Detailed algorithms mask */
431 __u32 cipher_algo_l;
432 __u32 cipher_algo_h;
433 __u32 hash_algo;
434 __u32 mac_algo_l;
435 __u32 mac_algo_h;
436 __u32 aead_algo;
437 /* Maximum length of cipher key */
438 __u32 max_cipher_key_len;
439 /* Maximum length of authenticated key */
440 __u32 max_auth_key_len;
441 __u32 reserve;
442 /* Maximum size of each crypto request's content */
443 __u64 max_size;
444};
445
446struct virtio_crypto_inhdr {
447 /* See VIRTIO_CRYPTO_* above */
448 __u8 status;
449};
450#endif
diff --git a/include/uapi/linux/virtio_ids.h b/include/uapi/linux/virtio_ids.h
index 3228d582234a..6d5c3b2d4f4d 100644
--- a/include/uapi/linux/virtio_ids.h
+++ b/include/uapi/linux/virtio_ids.h
@@ -42,5 +42,6 @@
42#define VIRTIO_ID_GPU 16 /* virtio GPU */ 42#define VIRTIO_ID_GPU 16 /* virtio GPU */
43#define VIRTIO_ID_INPUT 18 /* virtio input */ 43#define VIRTIO_ID_INPUT 18 /* virtio input */
44#define VIRTIO_ID_VSOCK 19 /* virtio vsock transport */ 44#define VIRTIO_ID_VSOCK 19 /* virtio vsock transport */
45#define VIRTIO_ID_CRYPTO 20 /* virtio crypto */
45 46
46#endif /* _LINUX_VIRTIO_IDS_H */ 47#endif /* _LINUX_VIRTIO_IDS_H */