aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/crypto/omap-aes.c
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/crypto/omap-aes.c')
-rw-r--r--drivers/crypto/omap-aes.c468
1 files changed, 261 insertions, 207 deletions
diff --git a/drivers/crypto/omap-aes.c b/drivers/crypto/omap-aes.c
index 5f7980586850..ce791c2f81f7 100644
--- a/drivers/crypto/omap-aes.c
+++ b/drivers/crypto/omap-aes.c
@@ -13,7 +13,9 @@
13 * 13 *
14 */ 14 */
15 15
16#define pr_fmt(fmt) "%s: " fmt, __func__ 16#define pr_fmt(fmt) "%20s: " fmt, __func__
17#define prn(num) pr_debug(#num "=%d\n", num)
18#define prx(num) pr_debug(#num "=%x\n", num)
17 19
18#include <linux/err.h> 20#include <linux/err.h>
19#include <linux/module.h> 21#include <linux/module.h>
@@ -38,6 +40,8 @@
38#define DST_MAXBURST 4 40#define DST_MAXBURST 4
39#define DMA_MIN (DST_MAXBURST * sizeof(u32)) 41#define DMA_MIN (DST_MAXBURST * sizeof(u32))
40 42
43#define _calc_walked(inout) (dd->inout##_walk.offset - dd->inout##_sg->offset)
44
41/* OMAP TRM gives bitfields as start:end, where start is the higher bit 45/* OMAP TRM gives bitfields as start:end, where start is the higher bit
42 number. For example 7:0 */ 46 number. For example 7:0 */
43#define FLD_MASK(start, end) (((1 << ((start) - (end) + 1)) - 1) << (end)) 47#define FLD_MASK(start, end) (((1 << ((start) - (end) + 1)) - 1) << (end))
@@ -74,6 +78,10 @@
74 78
75#define AES_REG_LENGTH_N(x) (0x54 + ((x) * 0x04)) 79#define AES_REG_LENGTH_N(x) (0x54 + ((x) * 0x04))
76 80
81#define AES_REG_IRQ_STATUS(dd) ((dd)->pdata->irq_status_ofs)
82#define AES_REG_IRQ_ENABLE(dd) ((dd)->pdata->irq_enable_ofs)
83#define AES_REG_IRQ_DATA_IN BIT(1)
84#define AES_REG_IRQ_DATA_OUT BIT(2)
77#define DEFAULT_TIMEOUT (5*HZ) 85#define DEFAULT_TIMEOUT (5*HZ)
78 86
79#define FLAGS_MODE_MASK 0x000f 87#define FLAGS_MODE_MASK 0x000f
@@ -86,6 +94,8 @@
86#define FLAGS_FAST BIT(5) 94#define FLAGS_FAST BIT(5)
87#define FLAGS_BUSY BIT(6) 95#define FLAGS_BUSY BIT(6)
88 96
97#define AES_BLOCK_WORDS (AES_BLOCK_SIZE >> 2)
98
89struct omap_aes_ctx { 99struct omap_aes_ctx {
90 struct omap_aes_dev *dd; 100 struct omap_aes_dev *dd;
91 101
@@ -119,6 +129,8 @@ struct omap_aes_pdata {
119 u32 data_ofs; 129 u32 data_ofs;
120 u32 rev_ofs; 130 u32 rev_ofs;
121 u32 mask_ofs; 131 u32 mask_ofs;
132 u32 irq_enable_ofs;
133 u32 irq_status_ofs;
122 134
123 u32 dma_enable_in; 135 u32 dma_enable_in;
124 u32 dma_enable_out; 136 u32 dma_enable_out;
@@ -146,25 +158,32 @@ struct omap_aes_dev {
146 struct tasklet_struct queue_task; 158 struct tasklet_struct queue_task;
147 159
148 struct ablkcipher_request *req; 160 struct ablkcipher_request *req;
161
162 /*
163 * total is used by PIO mode for book keeping so introduce
164 * variable total_save as need it to calc page_order
165 */
149 size_t total; 166 size_t total;
167 size_t total_save;
168
150 struct scatterlist *in_sg; 169 struct scatterlist *in_sg;
151 struct scatterlist in_sgl;
152 size_t in_offset;
153 struct scatterlist *out_sg; 170 struct scatterlist *out_sg;
171
172 /* Buffers for copying for unaligned cases */
173 struct scatterlist in_sgl;
154 struct scatterlist out_sgl; 174 struct scatterlist out_sgl;
155 size_t out_offset; 175 struct scatterlist *orig_out;
176 int sgs_copied;
156 177
157 size_t buflen; 178 struct scatter_walk in_walk;
158 void *buf_in; 179 struct scatter_walk out_walk;
159 size_t dma_size;
160 int dma_in; 180 int dma_in;
161 struct dma_chan *dma_lch_in; 181 struct dma_chan *dma_lch_in;
162 dma_addr_t dma_addr_in;
163 void *buf_out;
164 int dma_out; 182 int dma_out;
165 struct dma_chan *dma_lch_out; 183 struct dma_chan *dma_lch_out;
166 dma_addr_t dma_addr_out; 184 int in_sg_len;
167 185 int out_sg_len;
186 int pio_only;
168 const struct omap_aes_pdata *pdata; 187 const struct omap_aes_pdata *pdata;
169}; 188};
170 189
@@ -172,16 +191,36 @@ struct omap_aes_dev {
172static LIST_HEAD(dev_list); 191static LIST_HEAD(dev_list);
173static DEFINE_SPINLOCK(list_lock); 192static DEFINE_SPINLOCK(list_lock);
174 193
194#ifdef DEBUG
195#define omap_aes_read(dd, offset) \
196({ \
197 int _read_ret; \
198 _read_ret = __raw_readl(dd->io_base + offset); \
199 pr_debug("omap_aes_read(" #offset "=%#x)= %#x\n", \
200 offset, _read_ret); \
201 _read_ret; \
202})
203#else
175static inline u32 omap_aes_read(struct omap_aes_dev *dd, u32 offset) 204static inline u32 omap_aes_read(struct omap_aes_dev *dd, u32 offset)
176{ 205{
177 return __raw_readl(dd->io_base + offset); 206 return __raw_readl(dd->io_base + offset);
178} 207}
208#endif
179 209
210#ifdef DEBUG
211#define omap_aes_write(dd, offset, value) \
212 do { \
213 pr_debug("omap_aes_write(" #offset "=%#x) value=%#x\n", \
214 offset, value); \
215 __raw_writel(value, dd->io_base + offset); \
216 } while (0)
217#else
180static inline void omap_aes_write(struct omap_aes_dev *dd, u32 offset, 218static inline void omap_aes_write(struct omap_aes_dev *dd, u32 offset,
181 u32 value) 219 u32 value)
182{ 220{
183 __raw_writel(value, dd->io_base + offset); 221 __raw_writel(value, dd->io_base + offset);
184} 222}
223#endif
185 224
186static inline void omap_aes_write_mask(struct omap_aes_dev *dd, u32 offset, 225static inline void omap_aes_write_mask(struct omap_aes_dev *dd, u32 offset,
187 u32 value, u32 mask) 226 u32 value, u32 mask)
@@ -323,33 +362,6 @@ static int omap_aes_dma_init(struct omap_aes_dev *dd)
323 dd->dma_lch_out = NULL; 362 dd->dma_lch_out = NULL;
324 dd->dma_lch_in = NULL; 363 dd->dma_lch_in = NULL;
325 364
326 dd->buf_in = (void *)__get_free_pages(GFP_KERNEL, OMAP_AES_CACHE_SIZE);
327 dd->buf_out = (void *)__get_free_pages(GFP_KERNEL, OMAP_AES_CACHE_SIZE);
328 dd->buflen = PAGE_SIZE << OMAP_AES_CACHE_SIZE;
329 dd->buflen &= ~(AES_BLOCK_SIZE - 1);
330
331 if (!dd->buf_in || !dd->buf_out) {
332 dev_err(dd->dev, "unable to alloc pages.\n");
333 goto err_alloc;
334 }
335
336 /* MAP here */
337 dd->dma_addr_in = dma_map_single(dd->dev, dd->buf_in, dd->buflen,
338 DMA_TO_DEVICE);
339 if (dma_mapping_error(dd->dev, dd->dma_addr_in)) {
340 dev_err(dd->dev, "dma %d bytes error\n", dd->buflen);
341 err = -EINVAL;
342 goto err_map_in;
343 }
344
345 dd->dma_addr_out = dma_map_single(dd->dev, dd->buf_out, dd->buflen,
346 DMA_FROM_DEVICE);
347 if (dma_mapping_error(dd->dev, dd->dma_addr_out)) {
348 dev_err(dd->dev, "dma %d bytes error\n", dd->buflen);
349 err = -EINVAL;
350 goto err_map_out;
351 }
352
353 dma_cap_zero(mask); 365 dma_cap_zero(mask);
354 dma_cap_set(DMA_SLAVE, mask); 366 dma_cap_set(DMA_SLAVE, mask);
355 367
@@ -376,14 +388,6 @@ static int omap_aes_dma_init(struct omap_aes_dev *dd)
376err_dma_out: 388err_dma_out:
377 dma_release_channel(dd->dma_lch_in); 389 dma_release_channel(dd->dma_lch_in);
378err_dma_in: 390err_dma_in:
379 dma_unmap_single(dd->dev, dd->dma_addr_out, dd->buflen,
380 DMA_FROM_DEVICE);
381err_map_out:
382 dma_unmap_single(dd->dev, dd->dma_addr_in, dd->buflen, DMA_TO_DEVICE);
383err_map_in:
384 free_pages((unsigned long)dd->buf_out, OMAP_AES_CACHE_SIZE);
385 free_pages((unsigned long)dd->buf_in, OMAP_AES_CACHE_SIZE);
386err_alloc:
387 if (err) 391 if (err)
388 pr_err("error: %d\n", err); 392 pr_err("error: %d\n", err);
389 return err; 393 return err;
@@ -393,11 +397,6 @@ static void omap_aes_dma_cleanup(struct omap_aes_dev *dd)
393{ 397{
394 dma_release_channel(dd->dma_lch_out); 398 dma_release_channel(dd->dma_lch_out);
395 dma_release_channel(dd->dma_lch_in); 399 dma_release_channel(dd->dma_lch_in);
396 dma_unmap_single(dd->dev, dd->dma_addr_out, dd->buflen,
397 DMA_FROM_DEVICE);
398 dma_unmap_single(dd->dev, dd->dma_addr_in, dd->buflen, DMA_TO_DEVICE);
399 free_pages((unsigned long)dd->buf_out, OMAP_AES_CACHE_SIZE);
400 free_pages((unsigned long)dd->buf_in, OMAP_AES_CACHE_SIZE);
401} 400}
402 401
403static void sg_copy_buf(void *buf, struct scatterlist *sg, 402static void sg_copy_buf(void *buf, struct scatterlist *sg,
@@ -414,59 +413,27 @@ static void sg_copy_buf(void *buf, struct scatterlist *sg,
414 scatterwalk_done(&walk, out, 0); 413 scatterwalk_done(&walk, out, 0);
415} 414}
416 415
417static int sg_copy(struct scatterlist **sg, size_t *offset, void *buf,
418 size_t buflen, size_t total, int out)
419{
420 unsigned int count, off = 0;
421
422 while (buflen && total) {
423 count = min((*sg)->length - *offset, total);
424 count = min(count, buflen);
425
426 if (!count)
427 return off;
428
429 /*
430 * buflen and total are AES_BLOCK_SIZE size aligned,
431 * so count should be also aligned
432 */
433
434 sg_copy_buf(buf + off, *sg, *offset, count, out);
435
436 off += count;
437 buflen -= count;
438 *offset += count;
439 total -= count;
440
441 if (*offset == (*sg)->length) {
442 *sg = sg_next(*sg);
443 if (*sg)
444 *offset = 0;
445 else
446 total = 0;
447 }
448 }
449
450 return off;
451}
452
453static int omap_aes_crypt_dma(struct crypto_tfm *tfm, 416static int omap_aes_crypt_dma(struct crypto_tfm *tfm,
454 struct scatterlist *in_sg, struct scatterlist *out_sg) 417 struct scatterlist *in_sg, struct scatterlist *out_sg,
418 int in_sg_len, int out_sg_len)
455{ 419{
456 struct omap_aes_ctx *ctx = crypto_tfm_ctx(tfm); 420 struct omap_aes_ctx *ctx = crypto_tfm_ctx(tfm);
457 struct omap_aes_dev *dd = ctx->dd; 421 struct omap_aes_dev *dd = ctx->dd;
458 struct dma_async_tx_descriptor *tx_in, *tx_out; 422 struct dma_async_tx_descriptor *tx_in, *tx_out;
459 struct dma_slave_config cfg; 423 struct dma_slave_config cfg;
460 dma_addr_t dma_addr_in = sg_dma_address(in_sg); 424 int ret;
461 int ret, length = sg_dma_len(in_sg);
462 425
463 pr_debug("len: %d\n", length); 426 if (dd->pio_only) {
427 scatterwalk_start(&dd->in_walk, dd->in_sg);
428 scatterwalk_start(&dd->out_walk, dd->out_sg);
464 429
465 dd->dma_size = length; 430 /* Enable DATAIN interrupt and let it take
431 care of the rest */
432 omap_aes_write(dd, AES_REG_IRQ_ENABLE(dd), 0x2);
433 return 0;
434 }
466 435
467 if (!(dd->flags & FLAGS_FAST)) 436 dma_sync_sg_for_device(dd->dev, dd->in_sg, in_sg_len, DMA_TO_DEVICE);
468 dma_sync_single_for_device(dd->dev, dma_addr_in, length,
469 DMA_TO_DEVICE);
470 437
471 memset(&cfg, 0, sizeof(cfg)); 438 memset(&cfg, 0, sizeof(cfg));
472 439
@@ -485,7 +452,7 @@ static int omap_aes_crypt_dma(struct crypto_tfm *tfm,
485 return ret; 452 return ret;
486 } 453 }
487 454
488 tx_in = dmaengine_prep_slave_sg(dd->dma_lch_in, in_sg, 1, 455 tx_in = dmaengine_prep_slave_sg(dd->dma_lch_in, in_sg, in_sg_len,
489 DMA_MEM_TO_DEV, 456 DMA_MEM_TO_DEV,
490 DMA_PREP_INTERRUPT | DMA_CTRL_ACK); 457 DMA_PREP_INTERRUPT | DMA_CTRL_ACK);
491 if (!tx_in) { 458 if (!tx_in) {
@@ -504,7 +471,7 @@ static int omap_aes_crypt_dma(struct crypto_tfm *tfm,
504 return ret; 471 return ret;
505 } 472 }
506 473
507 tx_out = dmaengine_prep_slave_sg(dd->dma_lch_out, out_sg, 1, 474 tx_out = dmaengine_prep_slave_sg(dd->dma_lch_out, out_sg, out_sg_len,
508 DMA_DEV_TO_MEM, 475 DMA_DEV_TO_MEM,
509 DMA_PREP_INTERRUPT | DMA_CTRL_ACK); 476 DMA_PREP_INTERRUPT | DMA_CTRL_ACK);
510 if (!tx_out) { 477 if (!tx_out) {
@@ -522,7 +489,7 @@ static int omap_aes_crypt_dma(struct crypto_tfm *tfm,
522 dma_async_issue_pending(dd->dma_lch_out); 489 dma_async_issue_pending(dd->dma_lch_out);
523 490
524 /* start DMA */ 491 /* start DMA */
525 dd->pdata->trigger(dd, length); 492 dd->pdata->trigger(dd, dd->total);
526 493
527 return 0; 494 return 0;
528} 495}
@@ -531,93 +498,32 @@ static int omap_aes_crypt_dma_start(struct omap_aes_dev *dd)
531{ 498{
532 struct crypto_tfm *tfm = crypto_ablkcipher_tfm( 499 struct crypto_tfm *tfm = crypto_ablkcipher_tfm(
533 crypto_ablkcipher_reqtfm(dd->req)); 500 crypto_ablkcipher_reqtfm(dd->req));
534 int err, fast = 0, in, out; 501 int err;
535 size_t count;
536 dma_addr_t addr_in, addr_out;
537 struct scatterlist *in_sg, *out_sg;
538 int len32;
539 502
540 pr_debug("total: %d\n", dd->total); 503 pr_debug("total: %d\n", dd->total);
541 504
542 if (sg_is_last(dd->in_sg) && sg_is_last(dd->out_sg)) { 505 if (!dd->pio_only) {
543 /* check for alignment */ 506 err = dma_map_sg(dd->dev, dd->in_sg, dd->in_sg_len,
544 in = IS_ALIGNED((u32)dd->in_sg->offset, sizeof(u32)); 507 DMA_TO_DEVICE);
545 out = IS_ALIGNED((u32)dd->out_sg->offset, sizeof(u32));
546
547 fast = in && out;
548 }
549
550 if (fast) {
551 count = min(dd->total, sg_dma_len(dd->in_sg));
552 count = min(count, sg_dma_len(dd->out_sg));
553
554 if (count != dd->total) {
555 pr_err("request length != buffer length\n");
556 return -EINVAL;
557 }
558
559 pr_debug("fast\n");
560
561 err = dma_map_sg(dd->dev, dd->in_sg, 1, DMA_TO_DEVICE);
562 if (!err) { 508 if (!err) {
563 dev_err(dd->dev, "dma_map_sg() error\n"); 509 dev_err(dd->dev, "dma_map_sg() error\n");
564 return -EINVAL; 510 return -EINVAL;
565 } 511 }
566 512
567 err = dma_map_sg(dd->dev, dd->out_sg, 1, DMA_FROM_DEVICE); 513 err = dma_map_sg(dd->dev, dd->out_sg, dd->out_sg_len,
514 DMA_FROM_DEVICE);
568 if (!err) { 515 if (!err) {
569 dev_err(dd->dev, "dma_map_sg() error\n"); 516 dev_err(dd->dev, "dma_map_sg() error\n");
570 dma_unmap_sg(dd->dev, dd->in_sg, 1, DMA_TO_DEVICE);
571 return -EINVAL; 517 return -EINVAL;
572 } 518 }
573
574 addr_in = sg_dma_address(dd->in_sg);
575 addr_out = sg_dma_address(dd->out_sg);
576
577 in_sg = dd->in_sg;
578 out_sg = dd->out_sg;
579
580 dd->flags |= FLAGS_FAST;
581
582 } else {
583 /* use cache buffers */
584 count = sg_copy(&dd->in_sg, &dd->in_offset, dd->buf_in,
585 dd->buflen, dd->total, 0);
586
587 len32 = DIV_ROUND_UP(count, DMA_MIN) * DMA_MIN;
588
589 /*
590 * The data going into the AES module has been copied
591 * to a local buffer and the data coming out will go
592 * into a local buffer so set up local SG entries for
593 * both.
594 */
595 sg_init_table(&dd->in_sgl, 1);
596 dd->in_sgl.offset = dd->in_offset;
597 sg_dma_len(&dd->in_sgl) = len32;
598 sg_dma_address(&dd->in_sgl) = dd->dma_addr_in;
599
600 sg_init_table(&dd->out_sgl, 1);
601 dd->out_sgl.offset = dd->out_offset;
602 sg_dma_len(&dd->out_sgl) = len32;
603 sg_dma_address(&dd->out_sgl) = dd->dma_addr_out;
604
605 in_sg = &dd->in_sgl;
606 out_sg = &dd->out_sgl;
607
608 addr_in = dd->dma_addr_in;
609 addr_out = dd->dma_addr_out;
610
611 dd->flags &= ~FLAGS_FAST;
612
613 } 519 }
614 520
615 dd->total -= count; 521 err = omap_aes_crypt_dma(tfm, dd->in_sg, dd->out_sg, dd->in_sg_len,
616 522 dd->out_sg_len);
617 err = omap_aes_crypt_dma(tfm, in_sg, out_sg); 523 if (err && !dd->pio_only) {
618 if (err) { 524 dma_unmap_sg(dd->dev, dd->in_sg, dd->in_sg_len, DMA_TO_DEVICE);
619 dma_unmap_sg(dd->dev, dd->in_sg, 1, DMA_TO_DEVICE); 525 dma_unmap_sg(dd->dev, dd->out_sg, dd->out_sg_len,
620 dma_unmap_sg(dd->dev, dd->out_sg, 1, DMA_TO_DEVICE); 526 DMA_FROM_DEVICE);
621 } 527 }
622 528
623 return err; 529 return err;
@@ -637,7 +543,6 @@ static void omap_aes_finish_req(struct omap_aes_dev *dd, int err)
637static int omap_aes_crypt_dma_stop(struct omap_aes_dev *dd) 543static int omap_aes_crypt_dma_stop(struct omap_aes_dev *dd)
638{ 544{
639 int err = 0; 545 int err = 0;
640 size_t count;
641 546
642 pr_debug("total: %d\n", dd->total); 547 pr_debug("total: %d\n", dd->total);
643 548
@@ -646,23 +551,49 @@ static int omap_aes_crypt_dma_stop(struct omap_aes_dev *dd)
646 dmaengine_terminate_all(dd->dma_lch_in); 551 dmaengine_terminate_all(dd->dma_lch_in);
647 dmaengine_terminate_all(dd->dma_lch_out); 552 dmaengine_terminate_all(dd->dma_lch_out);
648 553
649 if (dd->flags & FLAGS_FAST) { 554 return err;
650 dma_unmap_sg(dd->dev, dd->out_sg, 1, DMA_FROM_DEVICE); 555}
651 dma_unmap_sg(dd->dev, dd->in_sg, 1, DMA_TO_DEVICE); 556
652 } else { 557int omap_aes_check_aligned(struct scatterlist *sg)
653 dma_sync_single_for_device(dd->dev, dd->dma_addr_out, 558{
654 dd->dma_size, DMA_FROM_DEVICE); 559 while (sg) {
655 560 if (!IS_ALIGNED(sg->offset, 4))
656 /* copy data */ 561 return -1;
657 count = sg_copy(&dd->out_sg, &dd->out_offset, dd->buf_out, 562 if (!IS_ALIGNED(sg->length, AES_BLOCK_SIZE))
658 dd->buflen, dd->dma_size, 1); 563 return -1;
659 if (count != dd->dma_size) { 564 sg = sg_next(sg);
660 err = -EINVAL;
661 pr_err("not all data converted: %u\n", count);
662 }
663 } 565 }
566 return 0;
567}
664 568
665 return err; 569int omap_aes_copy_sgs(struct omap_aes_dev *dd)
570{
571 void *buf_in, *buf_out;
572 int pages;
573
574 pages = get_order(dd->total);
575
576 buf_in = (void *)__get_free_pages(GFP_ATOMIC, pages);
577 buf_out = (void *)__get_free_pages(GFP_ATOMIC, pages);
578
579 if (!buf_in || !buf_out) {
580 pr_err("Couldn't allocated pages for unaligned cases.\n");
581 return -1;
582 }
583
584 dd->orig_out = dd->out_sg;
585
586 sg_copy_buf(buf_in, dd->in_sg, 0, dd->total, 0);
587
588 sg_init_table(&dd->in_sgl, 1);
589 sg_set_buf(&dd->in_sgl, buf_in, dd->total);
590 dd->in_sg = &dd->in_sgl;
591
592 sg_init_table(&dd->out_sgl, 1);
593 sg_set_buf(&dd->out_sgl, buf_out, dd->total);
594 dd->out_sg = &dd->out_sgl;
595
596 return 0;
666} 597}
667 598
668static int omap_aes_handle_queue(struct omap_aes_dev *dd, 599static int omap_aes_handle_queue(struct omap_aes_dev *dd,
@@ -698,11 +629,23 @@ static int omap_aes_handle_queue(struct omap_aes_dev *dd,
698 /* assign new request to device */ 629 /* assign new request to device */
699 dd->req = req; 630 dd->req = req;
700 dd->total = req->nbytes; 631 dd->total = req->nbytes;
701 dd->in_offset = 0; 632 dd->total_save = req->nbytes;
702 dd->in_sg = req->src; 633 dd->in_sg = req->src;
703 dd->out_offset = 0;
704 dd->out_sg = req->dst; 634 dd->out_sg = req->dst;
705 635
636 if (omap_aes_check_aligned(dd->in_sg) ||
637 omap_aes_check_aligned(dd->out_sg)) {
638 if (omap_aes_copy_sgs(dd))
639 pr_err("Failed to copy SGs for unaligned cases\n");
640 dd->sgs_copied = 1;
641 } else {
642 dd->sgs_copied = 0;
643 }
644
645 dd->in_sg_len = scatterwalk_bytes_sglen(dd->in_sg, dd->total);
646 dd->out_sg_len = scatterwalk_bytes_sglen(dd->out_sg, dd->total);
647 BUG_ON(dd->in_sg_len < 0 || dd->out_sg_len < 0);
648
706 rctx = ablkcipher_request_ctx(req); 649 rctx = ablkcipher_request_ctx(req);
707 ctx = crypto_ablkcipher_ctx(crypto_ablkcipher_reqtfm(req)); 650 ctx = crypto_ablkcipher_ctx(crypto_ablkcipher_reqtfm(req));
708 rctx->mode &= FLAGS_MODE_MASK; 651 rctx->mode &= FLAGS_MODE_MASK;
@@ -726,21 +669,32 @@ static int omap_aes_handle_queue(struct omap_aes_dev *dd,
726static void omap_aes_done_task(unsigned long data) 669static void omap_aes_done_task(unsigned long data)
727{ 670{
728 struct omap_aes_dev *dd = (struct omap_aes_dev *)data; 671 struct omap_aes_dev *dd = (struct omap_aes_dev *)data;
729 int err; 672 void *buf_in, *buf_out;
730 673 int pages;
731 pr_debug("enter\n"); 674
675 pr_debug("enter done_task\n");
676
677 if (!dd->pio_only) {
678 dma_sync_sg_for_device(dd->dev, dd->out_sg, dd->out_sg_len,
679 DMA_FROM_DEVICE);
680 dma_unmap_sg(dd->dev, dd->in_sg, dd->in_sg_len, DMA_TO_DEVICE);
681 dma_unmap_sg(dd->dev, dd->out_sg, dd->out_sg_len,
682 DMA_FROM_DEVICE);
683 omap_aes_crypt_dma_stop(dd);
684 }
732 685
733 err = omap_aes_crypt_dma_stop(dd); 686 if (dd->sgs_copied) {
687 buf_in = sg_virt(&dd->in_sgl);
688 buf_out = sg_virt(&dd->out_sgl);
734 689
735 err = dd->err ? : err; 690 sg_copy_buf(buf_out, dd->orig_out, 0, dd->total_save, 1);
736 691
737 if (dd->total && !err) { 692 pages = get_order(dd->total_save);
738 err = omap_aes_crypt_dma_start(dd); 693 free_pages((unsigned long)buf_in, pages);
739 if (!err) 694 free_pages((unsigned long)buf_out, pages);
740 return; /* DMA started. Not fininishing. */
741 } 695 }
742 696
743 omap_aes_finish_req(dd, err); 697 omap_aes_finish_req(dd, 0);
744 omap_aes_handle_queue(dd, NULL); 698 omap_aes_handle_queue(dd, NULL);
745 699
746 pr_debug("exit\n"); 700 pr_debug("exit\n");
@@ -1002,6 +956,8 @@ static const struct omap_aes_pdata omap_aes_pdata_omap4 = {
1002 .data_ofs = 0x60, 956 .data_ofs = 0x60,
1003 .rev_ofs = 0x80, 957 .rev_ofs = 0x80,
1004 .mask_ofs = 0x84, 958 .mask_ofs = 0x84,
959 .irq_status_ofs = 0x8c,
960 .irq_enable_ofs = 0x90,
1005 .dma_enable_in = BIT(5), 961 .dma_enable_in = BIT(5),
1006 .dma_enable_out = BIT(6), 962 .dma_enable_out = BIT(6),
1007 .major_mask = 0x0700, 963 .major_mask = 0x0700,
@@ -1010,6 +966,90 @@ static const struct omap_aes_pdata omap_aes_pdata_omap4 = {
1010 .minor_shift = 0, 966 .minor_shift = 0,
1011}; 967};
1012 968
969static irqreturn_t omap_aes_irq(int irq, void *dev_id)
970{
971 struct omap_aes_dev *dd = dev_id;
972 u32 status, i;
973 u32 *src, *dst;
974
975 status = omap_aes_read(dd, AES_REG_IRQ_STATUS(dd));
976 if (status & AES_REG_IRQ_DATA_IN) {
977 omap_aes_write(dd, AES_REG_IRQ_ENABLE(dd), 0x0);
978
979 BUG_ON(!dd->in_sg);
980
981 BUG_ON(_calc_walked(in) > dd->in_sg->length);
982
983 src = sg_virt(dd->in_sg) + _calc_walked(in);
984
985 for (i = 0; i < AES_BLOCK_WORDS; i++) {
986 omap_aes_write(dd, AES_REG_DATA_N(dd, i), *src);
987
988 scatterwalk_advance(&dd->in_walk, 4);
989 if (dd->in_sg->length == _calc_walked(in)) {
990 dd->in_sg = scatterwalk_sg_next(dd->in_sg);
991 if (dd->in_sg) {
992 scatterwalk_start(&dd->in_walk,
993 dd->in_sg);
994 src = sg_virt(dd->in_sg) +
995 _calc_walked(in);
996 }
997 } else {
998 src++;
999 }
1000 }
1001
1002 /* Clear IRQ status */
1003 status &= ~AES_REG_IRQ_DATA_IN;
1004 omap_aes_write(dd, AES_REG_IRQ_STATUS(dd), status);
1005
1006 /* Enable DATA_OUT interrupt */
1007 omap_aes_write(dd, AES_REG_IRQ_ENABLE(dd), 0x4);
1008
1009 } else if (status & AES_REG_IRQ_DATA_OUT) {
1010 omap_aes_write(dd, AES_REG_IRQ_ENABLE(dd), 0x0);
1011
1012 BUG_ON(!dd->out_sg);
1013
1014 BUG_ON(_calc_walked(out) > dd->out_sg->length);
1015
1016 dst = sg_virt(dd->out_sg) + _calc_walked(out);
1017
1018 for (i = 0; i < AES_BLOCK_WORDS; i++) {
1019 *dst = omap_aes_read(dd, AES_REG_DATA_N(dd, i));
1020 scatterwalk_advance(&dd->out_walk, 4);
1021 if (dd->out_sg->length == _calc_walked(out)) {
1022 dd->out_sg = scatterwalk_sg_next(dd->out_sg);
1023 if (dd->out_sg) {
1024 scatterwalk_start(&dd->out_walk,
1025 dd->out_sg);
1026 dst = sg_virt(dd->out_sg) +
1027 _calc_walked(out);
1028 }
1029 } else {
1030 dst++;
1031 }
1032 }
1033
1034 dd->total -= AES_BLOCK_SIZE;
1035
1036 BUG_ON(dd->total < 0);
1037
1038 /* Clear IRQ status */
1039 status &= ~AES_REG_IRQ_DATA_OUT;
1040 omap_aes_write(dd, AES_REG_IRQ_STATUS(dd), status);
1041
1042 if (!dd->total)
1043 /* All bytes read! */
1044 tasklet_schedule(&dd->done_task);
1045 else
1046 /* Enable DATA_IN interrupt for next block */
1047 omap_aes_write(dd, AES_REG_IRQ_ENABLE(dd), 0x2);
1048 }
1049
1050 return IRQ_HANDLED;
1051}
1052
1013static const struct of_device_id omap_aes_of_match[] = { 1053static const struct of_device_id omap_aes_of_match[] = {
1014 { 1054 {
1015 .compatible = "ti,omap2-aes", 1055 .compatible = "ti,omap2-aes",
@@ -1115,10 +1155,10 @@ static int omap_aes_probe(struct platform_device *pdev)
1115 struct omap_aes_dev *dd; 1155 struct omap_aes_dev *dd;
1116 struct crypto_alg *algp; 1156 struct crypto_alg *algp;
1117 struct resource res; 1157 struct resource res;
1118 int err = -ENOMEM, i, j; 1158 int err = -ENOMEM, i, j, irq = -1;
1119 u32 reg; 1159 u32 reg;
1120 1160
1121 dd = kzalloc(sizeof(struct omap_aes_dev), GFP_KERNEL); 1161 dd = devm_kzalloc(dev, sizeof(struct omap_aes_dev), GFP_KERNEL);
1122 if (dd == NULL) { 1162 if (dd == NULL) {
1123 dev_err(dev, "unable to alloc data struct.\n"); 1163 dev_err(dev, "unable to alloc data struct.\n");
1124 goto err_data; 1164 goto err_data;
@@ -1158,8 +1198,23 @@ static int omap_aes_probe(struct platform_device *pdev)
1158 tasklet_init(&dd->queue_task, omap_aes_queue_task, (unsigned long)dd); 1198 tasklet_init(&dd->queue_task, omap_aes_queue_task, (unsigned long)dd);
1159 1199
1160 err = omap_aes_dma_init(dd); 1200 err = omap_aes_dma_init(dd);
1161 if (err) 1201 if (err && AES_REG_IRQ_STATUS(dd) && AES_REG_IRQ_ENABLE(dd)) {
1162 goto err_dma; 1202 dd->pio_only = 1;
1203
1204 irq = platform_get_irq(pdev, 0);
1205 if (irq < 0) {
1206 dev_err(dev, "can't get IRQ resource\n");
1207 goto err_irq;
1208 }
1209
1210 err = devm_request_irq(dev, irq, omap_aes_irq, 0,
1211 dev_name(dev), dd);
1212 if (err) {
1213 dev_err(dev, "Unable to grab omap-aes IRQ\n");
1214 goto err_irq;
1215 }
1216 }
1217
1163 1218
1164 INIT_LIST_HEAD(&dd->list); 1219 INIT_LIST_HEAD(&dd->list);
1165 spin_lock(&list_lock); 1220 spin_lock(&list_lock);
@@ -1187,13 +1242,13 @@ err_algs:
1187 for (j = dd->pdata->algs_info[i].registered - 1; j >= 0; j--) 1242 for (j = dd->pdata->algs_info[i].registered - 1; j >= 0; j--)
1188 crypto_unregister_alg( 1243 crypto_unregister_alg(
1189 &dd->pdata->algs_info[i].algs_list[j]); 1244 &dd->pdata->algs_info[i].algs_list[j]);
1190 omap_aes_dma_cleanup(dd); 1245 if (!dd->pio_only)
1191err_dma: 1246 omap_aes_dma_cleanup(dd);
1247err_irq:
1192 tasklet_kill(&dd->done_task); 1248 tasklet_kill(&dd->done_task);
1193 tasklet_kill(&dd->queue_task); 1249 tasklet_kill(&dd->queue_task);
1194 pm_runtime_disable(dev); 1250 pm_runtime_disable(dev);
1195err_res: 1251err_res:
1196 kfree(dd);
1197 dd = NULL; 1252 dd = NULL;
1198err_data: 1253err_data:
1199 dev_err(dev, "initialization failed.\n"); 1254 dev_err(dev, "initialization failed.\n");
@@ -1221,7 +1276,6 @@ static int omap_aes_remove(struct platform_device *pdev)
1221 tasklet_kill(&dd->queue_task); 1276 tasklet_kill(&dd->queue_task);
1222 omap_aes_dma_cleanup(dd); 1277 omap_aes_dma_cleanup(dd);
1223 pm_runtime_disable(dd->dev); 1278 pm_runtime_disable(dd->dev);
1224 kfree(dd);
1225 dd = NULL; 1279 dd = NULL;
1226 1280
1227 return 0; 1281 return 0;