aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/crypto
diff options
context:
space:
mode:
authorTadeusz Struk <tadeusz.struk@intel.com>2014-12-08 15:05:42 -0500
committerHerbert Xu <herbert@gondor.apana.org.au>2014-12-22 07:02:41 -0500
commit82f82504b8f5f1013678bbc74e0882891114594a (patch)
tree115bb17ace856e6a9e3bd5d647bb5929618664ab /drivers/crypto
parent0f477b655a524515ec9a263d70d51f460c05a161 (diff)
crypto: qat - Fix assumption that sg in and out will have the same nents
Fixed invalid assumpion that the sgl in and sgl out will always have the same number of entries. Signed-off-by: Tadeusz Struk <tadeusz.struk@intel.com> Signed-off-by: Herbert Xu <herbert@gondor.apana.org.au>
Diffstat (limited to 'drivers/crypto')
-rw-r--r--drivers/crypto/qat/qat_common/qat_algs.c82
-rw-r--r--drivers/crypto/qat/qat_common/qat_crypto.h1
2 files changed, 50 insertions, 33 deletions
diff --git a/drivers/crypto/qat/qat_common/qat_algs.c b/drivers/crypto/qat/qat_common/qat_algs.c
index 19eea1c832ac..e4e32d872902 100644
--- a/drivers/crypto/qat/qat_common/qat_algs.c
+++ b/drivers/crypto/qat/qat_common/qat_algs.c
@@ -557,7 +557,8 @@ static void qat_alg_free_bufl(struct qat_crypto_instance *inst,
557 dma_addr_t blp = qat_req->buf.blp; 557 dma_addr_t blp = qat_req->buf.blp;
558 dma_addr_t blpout = qat_req->buf.bloutp; 558 dma_addr_t blpout = qat_req->buf.bloutp;
559 size_t sz = qat_req->buf.sz; 559 size_t sz = qat_req->buf.sz;
560 int i, bufs = bl->num_bufs; 560 size_t sz_out = qat_req->buf.sz_out;
561 int i;
561 562
562 for (i = 0; i < bl->num_bufs; i++) 563 for (i = 0; i < bl->num_bufs; i++)
563 dma_unmap_single(dev, bl->bufers[i].addr, 564 dma_unmap_single(dev, bl->bufers[i].addr,
@@ -567,14 +568,14 @@ static void qat_alg_free_bufl(struct qat_crypto_instance *inst,
567 kfree(bl); 568 kfree(bl);
568 if (blp != blpout) { 569 if (blp != blpout) {
569 /* If out of place operation dma unmap only data */ 570 /* If out of place operation dma unmap only data */
570 int bufless = bufs - blout->num_mapped_bufs; 571 int bufless = blout->num_bufs - blout->num_mapped_bufs;
571 572
572 for (i = bufless; i < bufs; i++) { 573 for (i = bufless; i < blout->num_bufs; i++) {
573 dma_unmap_single(dev, blout->bufers[i].addr, 574 dma_unmap_single(dev, blout->bufers[i].addr,
574 blout->bufers[i].len, 575 blout->bufers[i].len,
575 DMA_BIDIRECTIONAL); 576 DMA_BIDIRECTIONAL);
576 } 577 }
577 dma_unmap_single(dev, blpout, sz, DMA_TO_DEVICE); 578 dma_unmap_single(dev, blpout, sz_out, DMA_TO_DEVICE);
578 kfree(blout); 579 kfree(blout);
579 } 580 }
580} 581}
@@ -587,19 +588,20 @@ static int qat_alg_sgl_to_bufl(struct qat_crypto_instance *inst,
587 struct qat_crypto_request *qat_req) 588 struct qat_crypto_request *qat_req)
588{ 589{
589 struct device *dev = &GET_DEV(inst->accel_dev); 590 struct device *dev = &GET_DEV(inst->accel_dev);
590 int i, bufs = 0, n = sg_nents(sgl), assoc_n = sg_nents(assoc); 591 int i, bufs = 0, sg_nctr = 0;
592 int n = sg_nents(sgl), assoc_n = sg_nents(assoc);
591 struct qat_alg_buf_list *bufl; 593 struct qat_alg_buf_list *bufl;
592 struct qat_alg_buf_list *buflout = NULL; 594 struct qat_alg_buf_list *buflout = NULL;
593 dma_addr_t blp; 595 dma_addr_t blp;
594 dma_addr_t bloutp = 0; 596 dma_addr_t bloutp = 0;
595 struct scatterlist *sg; 597 struct scatterlist *sg;
596 size_t sz = sizeof(struct qat_alg_buf_list) + 598 size_t sz_out, sz = sizeof(struct qat_alg_buf_list) +
597 ((1 + n + assoc_n) * sizeof(struct qat_alg_buf)); 599 ((1 + n + assoc_n) * sizeof(struct qat_alg_buf));
598 600
599 if (unlikely(!n)) 601 if (unlikely(!n))
600 return -EINVAL; 602 return -EINVAL;
601 603
602 bufl = kmalloc_node(sz, GFP_ATOMIC, 604 bufl = kzalloc_node(sz, GFP_ATOMIC,
603 dev_to_node(&GET_DEV(inst->accel_dev))); 605 dev_to_node(&GET_DEV(inst->accel_dev)));
604 if (unlikely(!bufl)) 606 if (unlikely(!bufl))
605 return -ENOMEM; 607 return -ENOMEM;
@@ -620,15 +622,20 @@ static int qat_alg_sgl_to_bufl(struct qat_crypto_instance *inst,
620 goto err; 622 goto err;
621 bufs++; 623 bufs++;
622 } 624 }
623 bufl->bufers[bufs].addr = dma_map_single(dev, iv, ivlen, 625 if (ivlen) {
624 DMA_BIDIRECTIONAL); 626 bufl->bufers[bufs].addr = dma_map_single(dev, iv, ivlen,
625 bufl->bufers[bufs].len = ivlen; 627 DMA_BIDIRECTIONAL);
626 if (unlikely(dma_mapping_error(dev, bufl->bufers[bufs].addr))) 628 bufl->bufers[bufs].len = ivlen;
627 goto err; 629 if (unlikely(dma_mapping_error(dev, bufl->bufers[bufs].addr)))
628 bufs++; 630 goto err;
631 bufs++;
632 }
629 633
630 for_each_sg(sgl, sg, n, i) { 634 for_each_sg(sgl, sg, n, i) {
631 int y = i + bufs; 635 int y = sg_nctr + bufs;
636
637 if (!sg->length)
638 continue;
632 639
633 bufl->bufers[y].addr = dma_map_single(dev, sg_virt(sg), 640 bufl->bufers[y].addr = dma_map_single(dev, sg_virt(sg),
634 sg->length, 641 sg->length,
@@ -636,8 +643,9 @@ static int qat_alg_sgl_to_bufl(struct qat_crypto_instance *inst,
636 bufl->bufers[y].len = sg->length; 643 bufl->bufers[y].len = sg->length;
637 if (unlikely(dma_mapping_error(dev, bufl->bufers[y].addr))) 644 if (unlikely(dma_mapping_error(dev, bufl->bufers[y].addr)))
638 goto err; 645 goto err;
646 sg_nctr++;
639 } 647 }
640 bufl->num_bufs = n + bufs; 648 bufl->num_bufs = sg_nctr + bufs;
641 qat_req->buf.bl = bufl; 649 qat_req->buf.bl = bufl;
642 qat_req->buf.blp = blp; 650 qat_req->buf.blp = blp;
643 qat_req->buf.sz = sz; 651 qat_req->buf.sz = sz;
@@ -645,11 +653,15 @@ static int qat_alg_sgl_to_bufl(struct qat_crypto_instance *inst,
645 if (sgl != sglout) { 653 if (sgl != sglout) {
646 struct qat_alg_buf *bufers; 654 struct qat_alg_buf *bufers;
647 655
648 buflout = kmalloc_node(sz, GFP_ATOMIC, 656 n = sg_nents(sglout);
657 sz_out = sizeof(struct qat_alg_buf_list) +
658 ((1 + n + assoc_n) * sizeof(struct qat_alg_buf));
659 sg_nctr = 0;
660 buflout = kzalloc_node(sz_out, GFP_ATOMIC,
649 dev_to_node(&GET_DEV(inst->accel_dev))); 661 dev_to_node(&GET_DEV(inst->accel_dev)));
650 if (unlikely(!buflout)) 662 if (unlikely(!buflout))
651 goto err; 663 goto err;
652 bloutp = dma_map_single(dev, buflout, sz, DMA_TO_DEVICE); 664 bloutp = dma_map_single(dev, buflout, sz_out, DMA_TO_DEVICE);
653 if (unlikely(dma_mapping_error(dev, bloutp))) 665 if (unlikely(dma_mapping_error(dev, bloutp)))
654 goto err; 666 goto err;
655 bufers = buflout->bufers; 667 bufers = buflout->bufers;
@@ -660,47 +672,51 @@ static int qat_alg_sgl_to_bufl(struct qat_crypto_instance *inst,
660 bufers[i].addr = bufl->bufers[i].addr; 672 bufers[i].addr = bufl->bufers[i].addr;
661 } 673 }
662 for_each_sg(sglout, sg, n, i) { 674 for_each_sg(sglout, sg, n, i) {
663 int y = i + bufs; 675 int y = sg_nctr + bufs;
676
677 if (!sg->length)
678 continue;
664 679
665 bufers[y].addr = dma_map_single(dev, sg_virt(sg), 680 bufers[y].addr = dma_map_single(dev, sg_virt(sg),
666 sg->length, 681 sg->length,
667 DMA_BIDIRECTIONAL); 682 DMA_BIDIRECTIONAL);
668 buflout->bufers[y].len = sg->length;
669 if (unlikely(dma_mapping_error(dev, bufers[y].addr))) 683 if (unlikely(dma_mapping_error(dev, bufers[y].addr)))
670 goto err; 684 goto err;
685 bufers[y].len = sg->length;
686 sg_nctr++;
671 } 687 }
672 buflout->num_bufs = n + bufs; 688 buflout->num_bufs = sg_nctr + bufs;
673 buflout->num_mapped_bufs = n; 689 buflout->num_mapped_bufs = sg_nctr;
674 qat_req->buf.blout = buflout; 690 qat_req->buf.blout = buflout;
675 qat_req->buf.bloutp = bloutp; 691 qat_req->buf.bloutp = bloutp;
692 qat_req->buf.sz_out = sz_out;
676 } else { 693 } else {
677 /* Otherwise set the src and dst to the same address */ 694 /* Otherwise set the src and dst to the same address */
678 qat_req->buf.bloutp = qat_req->buf.blp; 695 qat_req->buf.bloutp = qat_req->buf.blp;
696 qat_req->buf.sz_out = 0;
679 } 697 }
680 return 0; 698 return 0;
681err: 699err:
682 dev_err(dev, "Failed to map buf for dma\n"); 700 dev_err(dev, "Failed to map buf for dma\n");
683 for_each_sg(sgl, sg, n + bufs, i) { 701 sg_nctr = 0;
684 if (!dma_mapping_error(dev, bufl->bufers[i].addr)) { 702 for (i = 0; i < n + bufs; i++)
703 if (!dma_mapping_error(dev, bufl->bufers[i].addr))
685 dma_unmap_single(dev, bufl->bufers[i].addr, 704 dma_unmap_single(dev, bufl->bufers[i].addr,
686 bufl->bufers[i].len, 705 bufl->bufers[i].len,
687 DMA_BIDIRECTIONAL); 706 DMA_BIDIRECTIONAL);
688 } 707
689 }
690 if (!dma_mapping_error(dev, blp)) 708 if (!dma_mapping_error(dev, blp))
691 dma_unmap_single(dev, blp, sz, DMA_TO_DEVICE); 709 dma_unmap_single(dev, blp, sz, DMA_TO_DEVICE);
692 kfree(bufl); 710 kfree(bufl);
693 if (sgl != sglout && buflout) { 711 if (sgl != sglout && buflout) {
694 for_each_sg(sglout, sg, n, i) { 712 n = sg_nents(sglout);
695 int y = i + bufs; 713 for (i = bufs; i < n + bufs; i++)
696 714 if (!dma_mapping_error(dev, buflout->bufers[i].addr))
697 if (!dma_mapping_error(dev, buflout->bufers[y].addr)) 715 dma_unmap_single(dev, buflout->bufers[i].addr,
698 dma_unmap_single(dev, buflout->bufers[y].addr, 716 buflout->bufers[i].len,
699 buflout->bufers[y].len,
700 DMA_BIDIRECTIONAL); 717 DMA_BIDIRECTIONAL);
701 }
702 if (!dma_mapping_error(dev, bloutp)) 718 if (!dma_mapping_error(dev, bloutp))
703 dma_unmap_single(dev, bloutp, sz, DMA_TO_DEVICE); 719 dma_unmap_single(dev, bloutp, sz_out, DMA_TO_DEVICE);
704 kfree(buflout); 720 kfree(buflout);
705 } 721 }
706 return -ENOMEM; 722 return -ENOMEM;
diff --git a/drivers/crypto/qat/qat_common/qat_crypto.h b/drivers/crypto/qat/qat_common/qat_crypto.h
index ab8468d11ddb..fcb323116e60 100644
--- a/drivers/crypto/qat/qat_common/qat_crypto.h
+++ b/drivers/crypto/qat/qat_common/qat_crypto.h
@@ -72,6 +72,7 @@ struct qat_crypto_request_buffs {
72 struct qat_alg_buf_list *blout; 72 struct qat_alg_buf_list *blout;
73 dma_addr_t bloutp; 73 dma_addr_t bloutp;
74 size_t sz; 74 size_t sz;
75 size_t sz_out;
75}; 76};
76 77
77struct qat_crypto_request { 78struct qat_crypto_request {