aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/crypto/ccp/ccp-ops.c
diff options
context:
space:
mode:
authorTom Lendacky <thomas.lendacky@amd.com>2014-01-06 14:34:17 -0500
committerHerbert Xu <herbert@gondor.apana.org.au>2014-01-14 22:33:39 -0500
commit81a59f000e1d4a60a03081a1fc64aee46d6f0c3e (patch)
tree95c4c83dc6175c6aa6f286676fbbd6c77419d5b8 /drivers/crypto/ccp/ccp-ops.c
parent393897c5156a415533ff85aa381458840417b032 (diff)
crypto: ccp - Change data length declarations to u64
When performing a hash operation if the amount of data buffered and a request at or near the maximum data length is received then the length calcuation could wrap causing an error in executing the hash operation. Fix this by using a u64 type for the input and output data lengths in all CCP operations. Signed-off-by: Tom Lendacky <thomas.lendacky@amd.com> Signed-off-by: Herbert Xu <herbert@gondor.apana.org.au>
Diffstat (limited to 'drivers/crypto/ccp/ccp-ops.c')
-rw-r--r--drivers/crypto/ccp/ccp-ops.c34
1 files changed, 19 insertions, 15 deletions
diff --git a/drivers/crypto/ccp/ccp-ops.c b/drivers/crypto/ccp/ccp-ops.c
index 4be091037549..71ed3ade7e12 100644
--- a/drivers/crypto/ccp/ccp-ops.c
+++ b/drivers/crypto/ccp/ccp-ops.c
@@ -60,9 +60,9 @@ struct ccp_sg_workarea {
60 unsigned int dma_count; 60 unsigned int dma_count;
61 enum dma_data_direction dma_dir; 61 enum dma_data_direction dma_dir;
62 62
63 u32 sg_used; 63 unsigned int sg_used;
64 64
65 u32 bytes_left; 65 u64 bytes_left;
66}; 66};
67 67
68struct ccp_data { 68struct ccp_data {
@@ -466,7 +466,7 @@ static void ccp_sg_free(struct ccp_sg_workarea *wa)
466} 466}
467 467
468static int ccp_init_sg_workarea(struct ccp_sg_workarea *wa, struct device *dev, 468static int ccp_init_sg_workarea(struct ccp_sg_workarea *wa, struct device *dev,
469 struct scatterlist *sg, unsigned int len, 469 struct scatterlist *sg, u64 len,
470 enum dma_data_direction dma_dir) 470 enum dma_data_direction dma_dir)
471{ 471{
472 memset(wa, 0, sizeof(*wa)); 472 memset(wa, 0, sizeof(*wa));
@@ -499,7 +499,7 @@ static int ccp_init_sg_workarea(struct ccp_sg_workarea *wa, struct device *dev,
499 499
500static void ccp_update_sg_workarea(struct ccp_sg_workarea *wa, unsigned int len) 500static void ccp_update_sg_workarea(struct ccp_sg_workarea *wa, unsigned int len)
501{ 501{
502 unsigned int nbytes = min(len, wa->bytes_left); 502 unsigned int nbytes = min_t(u64, len, wa->bytes_left);
503 503
504 if (!wa->sg) 504 if (!wa->sg)
505 return; 505 return;
@@ -653,7 +653,7 @@ static void ccp_free_data(struct ccp_data *data, struct ccp_cmd_queue *cmd_q)
653} 653}
654 654
655static int ccp_init_data(struct ccp_data *data, struct ccp_cmd_queue *cmd_q, 655static int ccp_init_data(struct ccp_data *data, struct ccp_cmd_queue *cmd_q,
656 struct scatterlist *sg, unsigned int sg_len, 656 struct scatterlist *sg, u64 sg_len,
657 unsigned int dm_len, 657 unsigned int dm_len,
658 enum dma_data_direction dir) 658 enum dma_data_direction dir)
659{ 659{
@@ -691,17 +691,20 @@ static unsigned int ccp_queue_buf(struct ccp_data *data, unsigned int from)
691 if (!sg_wa->sg) 691 if (!sg_wa->sg)
692 return 0; 692 return 0;
693 693
694 /* Perform the copy operation */ 694 /* Perform the copy operation
695 nbytes = min(sg_wa->bytes_left, dm_wa->length); 695 * nbytes will always be <= UINT_MAX because dm_wa->length is
696 * an unsigned int
697 */
698 nbytes = min_t(u64, sg_wa->bytes_left, dm_wa->length);
696 scatterwalk_map_and_copy(dm_wa->address, sg_wa->sg, sg_wa->sg_used, 699 scatterwalk_map_and_copy(dm_wa->address, sg_wa->sg, sg_wa->sg_used,
697 nbytes, from); 700 nbytes, from);
698 701
699 /* Update the structures and generate the count */ 702 /* Update the structures and generate the count */
700 buf_count = 0; 703 buf_count = 0;
701 while (sg_wa->bytes_left && (buf_count < dm_wa->length)) { 704 while (sg_wa->bytes_left && (buf_count < dm_wa->length)) {
702 nbytes = min3(sg_wa->sg->length - sg_wa->sg_used, 705 nbytes = min(sg_wa->sg->length - sg_wa->sg_used,
703 dm_wa->length - buf_count, 706 dm_wa->length - buf_count);
704 sg_wa->bytes_left); 707 nbytes = min_t(u64, sg_wa->bytes_left, nbytes);
705 708
706 buf_count += nbytes; 709 buf_count += nbytes;
707 ccp_update_sg_workarea(sg_wa, nbytes); 710 ccp_update_sg_workarea(sg_wa, nbytes);
@@ -728,14 +731,15 @@ static void ccp_prepare_data(struct ccp_data *src, struct ccp_data *dst,
728 731
729 /* The CCP can only DMA from/to one address each per operation. This 732 /* The CCP can only DMA from/to one address each per operation. This
730 * requires that we find the smallest DMA area between the source 733 * requires that we find the smallest DMA area between the source
731 * and destination. 734 * and destination. The resulting len values will always be <= UINT_MAX
735 * because the dma length is an unsigned int.
732 */ 736 */
733 sg_src_len = min(sg_dma_len(src->sg_wa.sg) - src->sg_wa.sg_used, 737 sg_src_len = sg_dma_len(src->sg_wa.sg) - src->sg_wa.sg_used;
734 src->sg_wa.bytes_left); 738 sg_src_len = min_t(u64, src->sg_wa.bytes_left, sg_src_len);
735 739
736 if (dst) { 740 if (dst) {
737 sg_dst_len = min(sg_dma_len(dst->sg_wa.sg) - dst->sg_wa.sg_used, 741 sg_dst_len = sg_dma_len(dst->sg_wa.sg) - dst->sg_wa.sg_used;
738 src->sg_wa.bytes_left); 742 sg_dst_len = min_t(u64, src->sg_wa.bytes_left, sg_dst_len);
739 op_len = min(sg_src_len, sg_dst_len); 743 op_len = min(sg_src_len, sg_dst_len);
740 } else 744 } else
741 op_len = sg_src_len; 745 op_len = sg_src_len;