aboutsummaryrefslogtreecommitdiffstats
path: root/drivers
diff options
context:
space:
mode:
Diffstat (limited to 'drivers')
-rw-r--r--drivers/crypto/Kconfig13
-rw-r--r--drivers/crypto/Makefile1
-rw-r--r--drivers/crypto/mv_cesa.c606
-rw-r--r--drivers/crypto/mv_cesa.h119
4 files changed, 739 insertions, 0 deletions
diff --git a/drivers/crypto/Kconfig b/drivers/crypto/Kconfig
index 1bb4b7fe4585..b08403d7d1ca 100644
--- a/drivers/crypto/Kconfig
+++ b/drivers/crypto/Kconfig
@@ -157,6 +157,19 @@ config S390_PRNG
157 ANSI X9.17 standard. The PRNG is usable via the char device 157 ANSI X9.17 standard. The PRNG is usable via the char device
158 /dev/prandom. 158 /dev/prandom.
159 159
160config CRYPTO_DEV_MV_CESA
161 tristate "Marvell's Cryptographic Engine"
162 depends on PLAT_ORION
163 select CRYPTO_ALGAPI
164 select CRYPTO_AES
165 select CRYPTO_BLKCIPHER2
166 help
167 This driver allows you to utilize the Cryptographic Engines and
168 Security Accelerator (CESA) which can be found on the Marvell Orion
169 and Kirkwood SoCs, such as QNAP's TS-209.
170
171 Currently the driver supports AES in ECB and CBC mode without DMA.
172
160config CRYPTO_DEV_HIFN_795X 173config CRYPTO_DEV_HIFN_795X
161 tristate "Driver HIFN 795x crypto accelerator chips" 174 tristate "Driver HIFN 795x crypto accelerator chips"
162 select CRYPTO_DES 175 select CRYPTO_DES
diff --git a/drivers/crypto/Makefile b/drivers/crypto/Makefile
index 9bf4a2bc8846..6ffcb3f7f942 100644
--- a/drivers/crypto/Makefile
+++ b/drivers/crypto/Makefile
@@ -2,6 +2,7 @@ obj-$(CONFIG_CRYPTO_DEV_PADLOCK_AES) += padlock-aes.o
2obj-$(CONFIG_CRYPTO_DEV_PADLOCK_SHA) += padlock-sha.o 2obj-$(CONFIG_CRYPTO_DEV_PADLOCK_SHA) += padlock-sha.o
3obj-$(CONFIG_CRYPTO_DEV_GEODE) += geode-aes.o 3obj-$(CONFIG_CRYPTO_DEV_GEODE) += geode-aes.o
4obj-$(CONFIG_CRYPTO_DEV_HIFN_795X) += hifn_795x.o 4obj-$(CONFIG_CRYPTO_DEV_HIFN_795X) += hifn_795x.o
5obj-$(CONFIG_CRYPTO_DEV_MV_CESA) += mv_cesa.o
5obj-$(CONFIG_CRYPTO_DEV_TALITOS) += talitos.o 6obj-$(CONFIG_CRYPTO_DEV_TALITOS) += talitos.o
6obj-$(CONFIG_CRYPTO_DEV_IXP4XX) += ixp4xx_crypto.o 7obj-$(CONFIG_CRYPTO_DEV_IXP4XX) += ixp4xx_crypto.o
7obj-$(CONFIG_CRYPTO_DEV_PPC4XX) += amcc/ 8obj-$(CONFIG_CRYPTO_DEV_PPC4XX) += amcc/
diff --git a/drivers/crypto/mv_cesa.c b/drivers/crypto/mv_cesa.c
new file mode 100644
index 000000000000..b21ef635f352
--- /dev/null
+++ b/drivers/crypto/mv_cesa.c
@@ -0,0 +1,606 @@
1/*
2 * Support for Marvell's crypto engine which can be found on some Orion5X
3 * boards.
4 *
5 * Author: Sebastian Andrzej Siewior < sebastian at breakpoint dot cc >
6 * License: GPLv2
7 *
8 */
9#include <crypto/aes.h>
10#include <crypto/algapi.h>
11#include <linux/crypto.h>
12#include <linux/interrupt.h>
13#include <linux/io.h>
14#include <linux/kthread.h>
15#include <linux/platform_device.h>
16#include <linux/scatterlist.h>
17
18#include "mv_cesa.h"
19/*
20 * STM:
21 * /---------------------------------------\
22 * | | request complete
23 * \./ |
24 * IDLE -> new request -> BUSY -> done -> DEQUEUE
25 * /°\ |
26 * | | more scatter entries
27 * \________________/
28 */
29enum engine_status {
30 ENGINE_IDLE,
31 ENGINE_BUSY,
32 ENGINE_W_DEQUEUE,
33};
34
35/**
36 * struct req_progress - used for every crypt request
37 * @src_sg_it: sg iterator for src
38 * @dst_sg_it: sg iterator for dst
39 * @sg_src_left: bytes left in src to process (scatter list)
40 * @src_start: offset to add to src start position (scatter list)
41 * @crypt_len: length of current crypt process
42 * @sg_dst_left: bytes left dst to process in this scatter list
43 * @dst_start: offset to add to dst start position (scatter list)
44 * @total_req_bytes: total number of bytes processed (request).
45 *
46 * sg helper are used to iterate over the scatterlist. Since the size of the
47 * SRAM may be less than the scatter size, this struct struct is used to keep
48 * track of progress within current scatterlist.
49 */
50struct req_progress {
51 struct sg_mapping_iter src_sg_it;
52 struct sg_mapping_iter dst_sg_it;
53
54 /* src mostly */
55 int sg_src_left;
56 int src_start;
57 int crypt_len;
58 /* dst mostly */
59 int sg_dst_left;
60 int dst_start;
61 int total_req_bytes;
62};
63
64struct crypto_priv {
65 void __iomem *reg;
66 void __iomem *sram;
67 int irq;
68 struct task_struct *queue_th;
69
70 /* the lock protects queue and eng_st */
71 spinlock_t lock;
72 struct crypto_queue queue;
73 enum engine_status eng_st;
74 struct ablkcipher_request *cur_req;
75 struct req_progress p;
76 int max_req_size;
77 int sram_size;
78};
79
80static struct crypto_priv *cpg;
81
82struct mv_ctx {
83 u8 aes_enc_key[AES_KEY_LEN];
84 u32 aes_dec_key[8];
85 int key_len;
86 u32 need_calc_aes_dkey;
87};
88
89enum crypto_op {
90 COP_AES_ECB,
91 COP_AES_CBC,
92};
93
94struct mv_req_ctx {
95 enum crypto_op op;
96 int decrypt;
97};
98
99static void compute_aes_dec_key(struct mv_ctx *ctx)
100{
101 struct crypto_aes_ctx gen_aes_key;
102 int key_pos;
103
104 if (!ctx->need_calc_aes_dkey)
105 return;
106
107 crypto_aes_expand_key(&gen_aes_key, ctx->aes_enc_key, ctx->key_len);
108
109 key_pos = ctx->key_len + 24;
110 memcpy(ctx->aes_dec_key, &gen_aes_key.key_enc[key_pos], 4 * 4);
111 switch (ctx->key_len) {
112 case AES_KEYSIZE_256:
113 key_pos -= 2;
114 /* fall */
115 case AES_KEYSIZE_192:
116 key_pos -= 2;
117 memcpy(&ctx->aes_dec_key[4], &gen_aes_key.key_enc[key_pos],
118 4 * 4);
119 break;
120 }
121 ctx->need_calc_aes_dkey = 0;
122}
123
124static int mv_setkey_aes(struct crypto_ablkcipher *cipher, const u8 *key,
125 unsigned int len)
126{
127 struct crypto_tfm *tfm = crypto_ablkcipher_tfm(cipher);
128 struct mv_ctx *ctx = crypto_tfm_ctx(tfm);
129
130 switch (len) {
131 case AES_KEYSIZE_128:
132 case AES_KEYSIZE_192:
133 case AES_KEYSIZE_256:
134 break;
135 default:
136 crypto_ablkcipher_set_flags(cipher, CRYPTO_TFM_RES_BAD_KEY_LEN);
137 return -EINVAL;
138 }
139 ctx->key_len = len;
140 ctx->need_calc_aes_dkey = 1;
141
142 memcpy(ctx->aes_enc_key, key, AES_KEY_LEN);
143 return 0;
144}
145
146static void setup_data_in(struct ablkcipher_request *req)
147{
148 int ret;
149 void *buf;
150
151 if (!cpg->p.sg_src_left) {
152 ret = sg_miter_next(&cpg->p.src_sg_it);
153 BUG_ON(!ret);
154 cpg->p.sg_src_left = cpg->p.src_sg_it.length;
155 cpg->p.src_start = 0;
156 }
157
158 cpg->p.crypt_len = min(cpg->p.sg_src_left, cpg->max_req_size);
159
160 buf = cpg->p.src_sg_it.addr;
161 buf += cpg->p.src_start;
162
163 memcpy(cpg->sram + SRAM_DATA_IN_START, buf, cpg->p.crypt_len);
164
165 cpg->p.sg_src_left -= cpg->p.crypt_len;
166 cpg->p.src_start += cpg->p.crypt_len;
167}
168
169static void mv_process_current_q(int first_block)
170{
171 struct ablkcipher_request *req = cpg->cur_req;
172 struct mv_ctx *ctx = crypto_tfm_ctx(req->base.tfm);
173 struct mv_req_ctx *req_ctx = ablkcipher_request_ctx(req);
174 struct sec_accel_config op;
175
176 switch (req_ctx->op) {
177 case COP_AES_ECB:
178 op.config = CFG_OP_CRYPT_ONLY | CFG_ENCM_AES | CFG_ENC_MODE_ECB;
179 break;
180 case COP_AES_CBC:
181 op.config = CFG_OP_CRYPT_ONLY | CFG_ENCM_AES | CFG_ENC_MODE_CBC;
182 op.enc_iv = ENC_IV_POINT(SRAM_DATA_IV) |
183 ENC_IV_BUF_POINT(SRAM_DATA_IV_BUF);
184 if (first_block)
185 memcpy(cpg->sram + SRAM_DATA_IV, req->info, 16);
186 break;
187 }
188 if (req_ctx->decrypt) {
189 op.config |= CFG_DIR_DEC;
190 memcpy(cpg->sram + SRAM_DATA_KEY_P, ctx->aes_dec_key,
191 AES_KEY_LEN);
192 } else {
193 op.config |= CFG_DIR_ENC;
194 memcpy(cpg->sram + SRAM_DATA_KEY_P, ctx->aes_enc_key,
195 AES_KEY_LEN);
196 }
197
198 switch (ctx->key_len) {
199 case AES_KEYSIZE_128:
200 op.config |= CFG_AES_LEN_128;
201 break;
202 case AES_KEYSIZE_192:
203 op.config |= CFG_AES_LEN_192;
204 break;
205 case AES_KEYSIZE_256:
206 op.config |= CFG_AES_LEN_256;
207 break;
208 }
209 op.enc_p = ENC_P_SRC(SRAM_DATA_IN_START) |
210 ENC_P_DST(SRAM_DATA_OUT_START);
211 op.enc_key_p = SRAM_DATA_KEY_P;
212
213 setup_data_in(req);
214 op.enc_len = cpg->p.crypt_len;
215 memcpy(cpg->sram + SRAM_CONFIG, &op,
216 sizeof(struct sec_accel_config));
217
218 writel(SRAM_CONFIG, cpg->reg + SEC_ACCEL_DESC_P0);
219 /* GO */
220 writel(SEC_CMD_EN_SEC_ACCL0, cpg->reg + SEC_ACCEL_CMD);
221
222 /*
223 * XXX: add timer if the interrupt does not occur for some mystery
224 * reason
225 */
226}
227
228static void mv_crypto_algo_completion(void)
229{
230 struct ablkcipher_request *req = cpg->cur_req;
231 struct mv_req_ctx *req_ctx = ablkcipher_request_ctx(req);
232
233 if (req_ctx->op != COP_AES_CBC)
234 return ;
235
236 memcpy(req->info, cpg->sram + SRAM_DATA_IV_BUF, 16);
237}
238
239static void dequeue_complete_req(void)
240{
241 struct ablkcipher_request *req = cpg->cur_req;
242 void *buf;
243 int ret;
244
245 cpg->p.total_req_bytes += cpg->p.crypt_len;
246 do {
247 int dst_copy;
248
249 if (!cpg->p.sg_dst_left) {
250 ret = sg_miter_next(&cpg->p.dst_sg_it);
251 BUG_ON(!ret);
252 cpg->p.sg_dst_left = cpg->p.dst_sg_it.length;
253 cpg->p.dst_start = 0;
254 }
255
256 buf = cpg->p.dst_sg_it.addr;
257 buf += cpg->p.dst_start;
258
259 dst_copy = min(cpg->p.crypt_len, cpg->p.sg_dst_left);
260
261 memcpy(buf, cpg->sram + SRAM_DATA_OUT_START, dst_copy);
262
263 cpg->p.sg_dst_left -= dst_copy;
264 cpg->p.crypt_len -= dst_copy;
265 cpg->p.dst_start += dst_copy;
266 } while (cpg->p.crypt_len > 0);
267
268 BUG_ON(cpg->eng_st != ENGINE_W_DEQUEUE);
269 if (cpg->p.total_req_bytes < req->nbytes) {
270 /* process next scatter list entry */
271 cpg->eng_st = ENGINE_BUSY;
272 mv_process_current_q(0);
273 } else {
274 sg_miter_stop(&cpg->p.src_sg_it);
275 sg_miter_stop(&cpg->p.dst_sg_it);
276 mv_crypto_algo_completion();
277 cpg->eng_st = ENGINE_IDLE;
278 req->base.complete(&req->base, 0);
279 }
280}
281
282static int count_sgs(struct scatterlist *sl, unsigned int total_bytes)
283{
284 int i = 0;
285
286 do {
287 total_bytes -= sl[i].length;
288 i++;
289
290 } while (total_bytes > 0);
291
292 return i;
293}
294
295static void mv_enqueue_new_req(struct ablkcipher_request *req)
296{
297 int num_sgs;
298
299 cpg->cur_req = req;
300 memset(&cpg->p, 0, sizeof(struct req_progress));
301
302 num_sgs = count_sgs(req->src, req->nbytes);
303 sg_miter_start(&cpg->p.src_sg_it, req->src, num_sgs, SG_MITER_FROM_SG);
304
305 num_sgs = count_sgs(req->dst, req->nbytes);
306 sg_miter_start(&cpg->p.dst_sg_it, req->dst, num_sgs, SG_MITER_TO_SG);
307 mv_process_current_q(1);
308}
309
310static int queue_manag(void *data)
311{
312 cpg->eng_st = ENGINE_IDLE;
313 do {
314 struct ablkcipher_request *req;
315 struct crypto_async_request *async_req = NULL;
316 struct crypto_async_request *backlog;
317
318 __set_current_state(TASK_INTERRUPTIBLE);
319
320 if (cpg->eng_st == ENGINE_W_DEQUEUE)
321 dequeue_complete_req();
322
323 spin_lock_irq(&cpg->lock);
324 if (cpg->eng_st == ENGINE_IDLE) {
325 backlog = crypto_get_backlog(&cpg->queue);
326 async_req = crypto_dequeue_request(&cpg->queue);
327 if (async_req) {
328 BUG_ON(cpg->eng_st != ENGINE_IDLE);
329 cpg->eng_st = ENGINE_BUSY;
330 }
331 }
332 spin_unlock_irq(&cpg->lock);
333
334 if (backlog) {
335 backlog->complete(backlog, -EINPROGRESS);
336 backlog = NULL;
337 }
338
339 if (async_req) {
340 req = container_of(async_req,
341 struct ablkcipher_request, base);
342 mv_enqueue_new_req(req);
343 async_req = NULL;
344 }
345
346 schedule();
347
348 } while (!kthread_should_stop());
349 return 0;
350}
351
352static int mv_handle_req(struct ablkcipher_request *req)
353{
354 unsigned long flags;
355 int ret;
356
357 spin_lock_irqsave(&cpg->lock, flags);
358 ret = ablkcipher_enqueue_request(&cpg->queue, req);
359 spin_unlock_irqrestore(&cpg->lock, flags);
360 wake_up_process(cpg->queue_th);
361 return ret;
362}
363
364static int mv_enc_aes_ecb(struct ablkcipher_request *req)
365{
366 struct mv_req_ctx *req_ctx = ablkcipher_request_ctx(req);
367
368 req_ctx->op = COP_AES_ECB;
369 req_ctx->decrypt = 0;
370
371 return mv_handle_req(req);
372}
373
374static int mv_dec_aes_ecb(struct ablkcipher_request *req)
375{
376 struct mv_ctx *ctx = crypto_tfm_ctx(req->base.tfm);
377 struct mv_req_ctx *req_ctx = ablkcipher_request_ctx(req);
378
379 req_ctx->op = COP_AES_ECB;
380 req_ctx->decrypt = 1;
381
382 compute_aes_dec_key(ctx);
383 return mv_handle_req(req);
384}
385
386static int mv_enc_aes_cbc(struct ablkcipher_request *req)
387{
388 struct mv_req_ctx *req_ctx = ablkcipher_request_ctx(req);
389
390 req_ctx->op = COP_AES_CBC;
391 req_ctx->decrypt = 0;
392
393 return mv_handle_req(req);
394}
395
396static int mv_dec_aes_cbc(struct ablkcipher_request *req)
397{
398 struct mv_ctx *ctx = crypto_tfm_ctx(req->base.tfm);
399 struct mv_req_ctx *req_ctx = ablkcipher_request_ctx(req);
400
401 req_ctx->op = COP_AES_CBC;
402 req_ctx->decrypt = 1;
403
404 compute_aes_dec_key(ctx);
405 return mv_handle_req(req);
406}
407
408static int mv_cra_init(struct crypto_tfm *tfm)
409{
410 tfm->crt_ablkcipher.reqsize = sizeof(struct mv_req_ctx);
411 return 0;
412}
413
414irqreturn_t crypto_int(int irq, void *priv)
415{
416 u32 val;
417
418 val = readl(cpg->reg + SEC_ACCEL_INT_STATUS);
419 if (!(val & SEC_INT_ACCEL0_DONE))
420 return IRQ_NONE;
421
422 val &= ~SEC_INT_ACCEL0_DONE;
423 writel(val, cpg->reg + FPGA_INT_STATUS);
424 writel(val, cpg->reg + SEC_ACCEL_INT_STATUS);
425 BUG_ON(cpg->eng_st != ENGINE_BUSY);
426 cpg->eng_st = ENGINE_W_DEQUEUE;
427 wake_up_process(cpg->queue_th);
428 return IRQ_HANDLED;
429}
430
431struct crypto_alg mv_aes_alg_ecb = {
432 .cra_name = "ecb(aes)",
433 .cra_driver_name = "mv-ecb-aes",
434 .cra_priority = 300,
435 .cra_flags = CRYPTO_ALG_TYPE_ABLKCIPHER | CRYPTO_ALG_ASYNC,
436 .cra_blocksize = 16,
437 .cra_ctxsize = sizeof(struct mv_ctx),
438 .cra_alignmask = 0,
439 .cra_type = &crypto_ablkcipher_type,
440 .cra_module = THIS_MODULE,
441 .cra_init = mv_cra_init,
442 .cra_u = {
443 .ablkcipher = {
444 .min_keysize = AES_MIN_KEY_SIZE,
445 .max_keysize = AES_MAX_KEY_SIZE,
446 .setkey = mv_setkey_aes,
447 .encrypt = mv_enc_aes_ecb,
448 .decrypt = mv_dec_aes_ecb,
449 },
450 },
451};
452
453struct crypto_alg mv_aes_alg_cbc = {
454 .cra_name = "cbc(aes)",
455 .cra_driver_name = "mv-cbc-aes",
456 .cra_priority = 300,
457 .cra_flags = CRYPTO_ALG_TYPE_ABLKCIPHER | CRYPTO_ALG_ASYNC,
458 .cra_blocksize = AES_BLOCK_SIZE,
459 .cra_ctxsize = sizeof(struct mv_ctx),
460 .cra_alignmask = 0,
461 .cra_type = &crypto_ablkcipher_type,
462 .cra_module = THIS_MODULE,
463 .cra_init = mv_cra_init,
464 .cra_u = {
465 .ablkcipher = {
466 .ivsize = AES_BLOCK_SIZE,
467 .min_keysize = AES_MIN_KEY_SIZE,
468 .max_keysize = AES_MAX_KEY_SIZE,
469 .setkey = mv_setkey_aes,
470 .encrypt = mv_enc_aes_cbc,
471 .decrypt = mv_dec_aes_cbc,
472 },
473 },
474};
475
476static int mv_probe(struct platform_device *pdev)
477{
478 struct crypto_priv *cp;
479 struct resource *res;
480 int irq;
481 int ret;
482
483 if (cpg) {
484 printk(KERN_ERR "Second crypto dev?\n");
485 return -EEXIST;
486 }
487
488 res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "regs");
489 if (!res)
490 return -ENXIO;
491
492 cp = kzalloc(sizeof(*cp), GFP_KERNEL);
493 if (!cp)
494 return -ENOMEM;
495
496 spin_lock_init(&cp->lock);
497 crypto_init_queue(&cp->queue, 50);
498 cp->reg = ioremap(res->start, res->end - res->start + 1);
499 if (!cp->reg) {
500 ret = -ENOMEM;
501 goto err;
502 }
503
504 res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "sram");
505 if (!res) {
506 ret = -ENXIO;
507 goto err_unmap_reg;
508 }
509 cp->sram_size = res->end - res->start + 1;
510 cp->max_req_size = cp->sram_size - SRAM_CFG_SPACE;
511 cp->sram = ioremap(res->start, cp->sram_size);
512 if (!cp->sram) {
513 ret = -ENOMEM;
514 goto err_unmap_reg;
515 }
516
517 irq = platform_get_irq(pdev, 0);
518 if (irq < 0 || irq == NO_IRQ) {
519 ret = irq;
520 goto err_unmap_sram;
521 }
522 cp->irq = irq;
523
524 platform_set_drvdata(pdev, cp);
525 cpg = cp;
526
527 cp->queue_th = kthread_run(queue_manag, cp, "mv_crypto");
528 if (IS_ERR(cp->queue_th)) {
529 ret = PTR_ERR(cp->queue_th);
530 goto err_thread;
531 }
532
533 ret = request_irq(irq, crypto_int, IRQF_DISABLED, dev_name(&pdev->dev),
534 cp);
535 if (ret)
536 goto err_unmap_sram;
537
538 writel(SEC_INT_ACCEL0_DONE, cpg->reg + SEC_ACCEL_INT_MASK);
539 writel(SEC_CFG_STOP_DIG_ERR, cpg->reg + SEC_ACCEL_CFG);
540
541 ret = crypto_register_alg(&mv_aes_alg_ecb);
542 if (ret)
543 goto err_reg;
544
545 ret = crypto_register_alg(&mv_aes_alg_cbc);
546 if (ret)
547 goto err_unreg_ecb;
548 return 0;
549err_unreg_ecb:
550 crypto_unregister_alg(&mv_aes_alg_ecb);
551err_thread:
552 free_irq(irq, cp);
553err_reg:
554 kthread_stop(cp->queue_th);
555err_unmap_sram:
556 iounmap(cp->sram);
557err_unmap_reg:
558 iounmap(cp->reg);
559err:
560 kfree(cp);
561 cpg = NULL;
562 platform_set_drvdata(pdev, NULL);
563 return ret;
564}
565
566static int mv_remove(struct platform_device *pdev)
567{
568 struct crypto_priv *cp = platform_get_drvdata(pdev);
569
570 crypto_unregister_alg(&mv_aes_alg_ecb);
571 crypto_unregister_alg(&mv_aes_alg_cbc);
572 kthread_stop(cp->queue_th);
573 free_irq(cp->irq, cp);
574 memset(cp->sram, 0, cp->sram_size);
575 iounmap(cp->sram);
576 iounmap(cp->reg);
577 kfree(cp);
578 cpg = NULL;
579 return 0;
580}
581
582static struct platform_driver marvell_crypto = {
583 .probe = mv_probe,
584 .remove = mv_remove,
585 .driver = {
586 .owner = THIS_MODULE,
587 .name = "mv_crypto",
588 },
589};
590MODULE_ALIAS("platform:mv_crypto");
591
592static int __init mv_crypto_init(void)
593{
594 return platform_driver_register(&marvell_crypto);
595}
596module_init(mv_crypto_init);
597
598static void __exit mv_crypto_exit(void)
599{
600 platform_driver_unregister(&marvell_crypto);
601}
602module_exit(mv_crypto_exit);
603
604MODULE_AUTHOR("Sebastian Andrzej Siewior <sebastian@breakpoint.cc>");
605MODULE_DESCRIPTION("Support for Marvell's cryptographic engine");
606MODULE_LICENSE("GPL");
diff --git a/drivers/crypto/mv_cesa.h b/drivers/crypto/mv_cesa.h
new file mode 100644
index 000000000000..c3e25d3bb171
--- /dev/null
+++ b/drivers/crypto/mv_cesa.h
@@ -0,0 +1,119 @@
1#ifndef __MV_CRYPTO_H__
2
3#define DIGEST_INITIAL_VAL_A 0xdd00
4#define DES_CMD_REG 0xdd58
5
6#define SEC_ACCEL_CMD 0xde00
7#define SEC_CMD_EN_SEC_ACCL0 (1 << 0)
8#define SEC_CMD_EN_SEC_ACCL1 (1 << 1)
9#define SEC_CMD_DISABLE_SEC (1 << 2)
10
11#define SEC_ACCEL_DESC_P0 0xde04
12#define SEC_DESC_P0_PTR(x) (x)
13
14#define SEC_ACCEL_DESC_P1 0xde14
15#define SEC_DESC_P1_PTR(x) (x)
16
17#define SEC_ACCEL_CFG 0xde08
18#define SEC_CFG_STOP_DIG_ERR (1 << 0)
19#define SEC_CFG_CH0_W_IDMA (1 << 7)
20#define SEC_CFG_CH1_W_IDMA (1 << 8)
21#define SEC_CFG_ACT_CH0_IDMA (1 << 9)
22#define SEC_CFG_ACT_CH1_IDMA (1 << 10)
23
24#define SEC_ACCEL_STATUS 0xde0c
25#define SEC_ST_ACT_0 (1 << 0)
26#define SEC_ST_ACT_1 (1 << 1)
27
28/*
29 * FPGA_INT_STATUS looks like a FPGA leftover and is documented only in Errata
30 * 4.12. It looks like that it was part of an IRQ-controller in FPGA and
31 * someone forgot to remove it while switching to the core and moving to
32 * SEC_ACCEL_INT_STATUS.
33 */
34#define FPGA_INT_STATUS 0xdd68
35#define SEC_ACCEL_INT_STATUS 0xde20
36#define SEC_INT_AUTH_DONE (1 << 0)
37#define SEC_INT_DES_E_DONE (1 << 1)
38#define SEC_INT_AES_E_DONE (1 << 2)
39#define SEC_INT_AES_D_DONE (1 << 3)
40#define SEC_INT_ENC_DONE (1 << 4)
41#define SEC_INT_ACCEL0_DONE (1 << 5)
42#define SEC_INT_ACCEL1_DONE (1 << 6)
43#define SEC_INT_ACC0_IDMA_DONE (1 << 7)
44#define SEC_INT_ACC1_IDMA_DONE (1 << 8)
45
46#define SEC_ACCEL_INT_MASK 0xde24
47
48#define AES_KEY_LEN (8 * 4)
49
50struct sec_accel_config {
51
52 u32 config;
53#define CFG_OP_MAC_ONLY 0
54#define CFG_OP_CRYPT_ONLY 1
55#define CFG_OP_MAC_CRYPT 2
56#define CFG_OP_CRYPT_MAC 3
57#define CFG_MACM_MD5 (4 << 4)
58#define CFG_MACM_SHA1 (5 << 4)
59#define CFG_MACM_HMAC_MD5 (6 << 4)
60#define CFG_MACM_HMAC_SHA1 (7 << 4)
61#define CFG_ENCM_DES (1 << 8)
62#define CFG_ENCM_3DES (2 << 8)
63#define CFG_ENCM_AES (3 << 8)
64#define CFG_DIR_ENC (0 << 12)
65#define CFG_DIR_DEC (1 << 12)
66#define CFG_ENC_MODE_ECB (0 << 16)
67#define CFG_ENC_MODE_CBC (1 << 16)
68#define CFG_3DES_EEE (0 << 20)
69#define CFG_3DES_EDE (1 << 20)
70#define CFG_AES_LEN_128 (0 << 24)
71#define CFG_AES_LEN_192 (1 << 24)
72#define CFG_AES_LEN_256 (2 << 24)
73
74 u32 enc_p;
75#define ENC_P_SRC(x) (x)
76#define ENC_P_DST(x) ((x) << 16)
77
78 u32 enc_len;
79#define ENC_LEN(x) (x)
80
81 u32 enc_key_p;
82#define ENC_KEY_P(x) (x)
83
84 u32 enc_iv;
85#define ENC_IV_POINT(x) ((x) << 0)
86#define ENC_IV_BUF_POINT(x) ((x) << 16)
87
88 u32 mac_src_p;
89#define MAC_SRC_DATA_P(x) (x)
90#define MAC_SRC_TOTAL_LEN(x) ((x) << 16)
91
92 u32 mac_digest;
93 u32 mac_iv;
94}__attribute__ ((packed));
95 /*
96 * /-----------\ 0
97 * | ACCEL CFG | 4 * 8
98 * |-----------| 0x20
99 * | CRYPT KEY | 8 * 4
100 * |-----------| 0x40
101 * | IV IN | 4 * 4
102 * |-----------| 0x40 (inplace)
103 * | IV BUF | 4 * 4
104 * |-----------| 0x50
105 * | DATA IN | 16 * x (max ->max_req_size)
106 * |-----------| 0x50 (inplace operation)
107 * | DATA OUT | 16 * x (max ->max_req_size)
108 * \-----------/ SRAM size
109 */
110#define SRAM_CONFIG 0x00
111#define SRAM_DATA_KEY_P 0x20
112#define SRAM_DATA_IV 0x40
113#define SRAM_DATA_IV_BUF 0x40
114#define SRAM_DATA_IN_START 0x50
115#define SRAM_DATA_OUT_START 0x50
116
117#define SRAM_CFG_SPACE 0x50
118
119#endif