diff options
Diffstat (limited to 'drivers/crypto')
-rw-r--r-- | drivers/crypto/omap-aes.c | 86 |
1 files changed, 83 insertions, 3 deletions
diff --git a/drivers/crypto/omap-aes.c b/drivers/crypto/omap-aes.c index 7a08a152838a..2fd22ca6a58f 100644 --- a/drivers/crypto/omap-aes.c +++ b/drivers/crypto/omap-aes.c | |||
@@ -158,9 +158,23 @@ struct omap_aes_dev { | |||
158 | struct tasklet_struct queue_task; | 158 | struct tasklet_struct queue_task; |
159 | 159 | ||
160 | struct ablkcipher_request *req; | 160 | struct ablkcipher_request *req; |
161 | |||
162 | /* | ||
163 | * total is used by PIO mode for book keeping so introduce | ||
164 | * variable total_save as need it to calc page_order | ||
165 | */ | ||
161 | size_t total; | 166 | size_t total; |
167 | size_t total_save; | ||
168 | |||
162 | struct scatterlist *in_sg; | 169 | struct scatterlist *in_sg; |
163 | struct scatterlist *out_sg; | 170 | struct scatterlist *out_sg; |
171 | |||
172 | /* Buffers for copying for unaligned cases */ | ||
173 | struct scatterlist in_sgl; | ||
174 | struct scatterlist out_sgl; | ||
175 | struct scatterlist *orig_out; | ||
176 | int sgs_copied; | ||
177 | |||
164 | struct scatter_walk in_walk; | 178 | struct scatter_walk in_walk; |
165 | struct scatter_walk out_walk; | 179 | struct scatter_walk out_walk; |
166 | int dma_in; | 180 | int dma_in; |
@@ -537,12 +551,51 @@ static int omap_aes_crypt_dma_stop(struct omap_aes_dev *dd) | |||
537 | dmaengine_terminate_all(dd->dma_lch_in); | 551 | dmaengine_terminate_all(dd->dma_lch_in); |
538 | dmaengine_terminate_all(dd->dma_lch_out); | 552 | dmaengine_terminate_all(dd->dma_lch_out); |
539 | 553 | ||
540 | dma_unmap_sg(dd->dev, dd->in_sg, dd->in_sg_len, DMA_TO_DEVICE); | ||
541 | dma_unmap_sg(dd->dev, dd->out_sg, dd->out_sg_len, DMA_FROM_DEVICE); | ||
542 | |||
543 | return err; | 554 | return err; |
544 | } | 555 | } |
545 | 556 | ||
557 | int omap_aes_check_aligned(struct scatterlist *sg) | ||
558 | { | ||
559 | while (sg) { | ||
560 | if (!IS_ALIGNED(sg->offset, 4)) | ||
561 | return -1; | ||
562 | if (!IS_ALIGNED(sg->length, AES_BLOCK_SIZE)) | ||
563 | return -1; | ||
564 | sg = sg_next(sg); | ||
565 | } | ||
566 | return 0; | ||
567 | } | ||
568 | |||
569 | int omap_aes_copy_sgs(struct omap_aes_dev *dd) | ||
570 | { | ||
571 | void *buf_in, *buf_out; | ||
572 | int pages; | ||
573 | |||
574 | pages = get_order(dd->total); | ||
575 | |||
576 | buf_in = (void *)__get_free_pages(GFP_ATOMIC, pages); | ||
577 | buf_out = (void *)__get_free_pages(GFP_ATOMIC, pages); | ||
578 | |||
579 | if (!buf_in || !buf_out) { | ||
580 | pr_err("Couldn't allocated pages for unaligned cases.\n"); | ||
581 | return -1; | ||
582 | } | ||
583 | |||
584 | dd->orig_out = dd->out_sg; | ||
585 | |||
586 | sg_copy_buf(buf_in, dd->in_sg, 0, dd->total, 0); | ||
587 | |||
588 | sg_init_table(&dd->in_sgl, 1); | ||
589 | sg_set_buf(&dd->in_sgl, buf_in, dd->total); | ||
590 | dd->in_sg = &dd->in_sgl; | ||
591 | |||
592 | sg_init_table(&dd->out_sgl, 1); | ||
593 | sg_set_buf(&dd->out_sgl, buf_out, dd->total); | ||
594 | dd->out_sg = &dd->out_sgl; | ||
595 | |||
596 | return 0; | ||
597 | } | ||
598 | |||
546 | static int omap_aes_handle_queue(struct omap_aes_dev *dd, | 599 | static int omap_aes_handle_queue(struct omap_aes_dev *dd, |
547 | struct ablkcipher_request *req) | 600 | struct ablkcipher_request *req) |
548 | { | 601 | { |
@@ -576,9 +629,19 @@ static int omap_aes_handle_queue(struct omap_aes_dev *dd, | |||
576 | /* assign new request to device */ | 629 | /* assign new request to device */ |
577 | dd->req = req; | 630 | dd->req = req; |
578 | dd->total = req->nbytes; | 631 | dd->total = req->nbytes; |
632 | dd->total_save = req->nbytes; | ||
579 | dd->in_sg = req->src; | 633 | dd->in_sg = req->src; |
580 | dd->out_sg = req->dst; | 634 | dd->out_sg = req->dst; |
581 | 635 | ||
636 | if (omap_aes_check_aligned(dd->in_sg) || | ||
637 | omap_aes_check_aligned(dd->out_sg)) { | ||
638 | if (omap_aes_copy_sgs(dd)) | ||
639 | pr_err("Failed to copy SGs for unaligned cases\n"); | ||
640 | dd->sgs_copied = 1; | ||
641 | } else { | ||
642 | dd->sgs_copied = 0; | ||
643 | } | ||
644 | |||
582 | dd->in_sg_len = scatterwalk_bytes_sglen(dd->in_sg, dd->total); | 645 | dd->in_sg_len = scatterwalk_bytes_sglen(dd->in_sg, dd->total); |
583 | dd->out_sg_len = scatterwalk_bytes_sglen(dd->out_sg, dd->total); | 646 | dd->out_sg_len = scatterwalk_bytes_sglen(dd->out_sg, dd->total); |
584 | BUG_ON(dd->in_sg_len < 0 || dd->out_sg_len < 0); | 647 | BUG_ON(dd->in_sg_len < 0 || dd->out_sg_len < 0); |
@@ -606,14 +669,31 @@ static int omap_aes_handle_queue(struct omap_aes_dev *dd, | |||
606 | static void omap_aes_done_task(unsigned long data) | 669 | static void omap_aes_done_task(unsigned long data) |
607 | { | 670 | { |
608 | struct omap_aes_dev *dd = (struct omap_aes_dev *)data; | 671 | struct omap_aes_dev *dd = (struct omap_aes_dev *)data; |
672 | void *buf_in, *buf_out; | ||
673 | int pages; | ||
609 | 674 | ||
610 | pr_debug("enter done_task\n"); | 675 | pr_debug("enter done_task\n"); |
611 | 676 | ||
612 | if (!dd->pio_only) { | 677 | if (!dd->pio_only) { |
613 | dma_sync_sg_for_device(dd->dev, dd->out_sg, dd->out_sg_len, | 678 | dma_sync_sg_for_device(dd->dev, dd->out_sg, dd->out_sg_len, |
614 | DMA_FROM_DEVICE); | 679 | DMA_FROM_DEVICE); |
680 | dma_unmap_sg(dd->dev, dd->in_sg, dd->in_sg_len, DMA_TO_DEVICE); | ||
681 | dma_unmap_sg(dd->dev, dd->out_sg, dd->out_sg_len, | ||
682 | DMA_FROM_DEVICE); | ||
615 | omap_aes_crypt_dma_stop(dd); | 683 | omap_aes_crypt_dma_stop(dd); |
616 | } | 684 | } |
685 | |||
686 | if (dd->sgs_copied) { | ||
687 | buf_in = sg_virt(&dd->in_sgl); | ||
688 | buf_out = sg_virt(&dd->out_sgl); | ||
689 | |||
690 | sg_copy_buf(buf_out, dd->orig_out, 0, dd->total_save, 1); | ||
691 | |||
692 | pages = get_order(dd->total_save); | ||
693 | free_pages((unsigned long)buf_in, pages); | ||
694 | free_pages((unsigned long)buf_out, pages); | ||
695 | } | ||
696 | |||
617 | omap_aes_finish_req(dd, 0); | 697 | omap_aes_finish_req(dd, 0); |
618 | omap_aes_handle_queue(dd, NULL); | 698 | omap_aes_handle_queue(dd, NULL); |
619 | 699 | ||