diff options
author | Tom Lendacky <thomas.lendacky@amd.com> | 2014-01-06 14:34:17 -0500 |
---|---|---|
committer | Herbert Xu <herbert@gondor.apana.org.au> | 2014-01-14 22:33:39 -0500 |
commit | 81a59f000e1d4a60a03081a1fc64aee46d6f0c3e (patch) | |
tree | 95c4c83dc6175c6aa6f286676fbbd6c77419d5b8 /drivers/crypto | |
parent | 393897c5156a415533ff85aa381458840417b032 (diff) |
crypto: ccp - Change data length declarations to u64
When performing a hash operation if the amount of data buffered and a
request at or near the maximum data length is received then the length
calcuation could wrap causing an error in executing the hash operation.
Fix this by using a u64 type for the input and output data lengths in
all CCP operations.
Signed-off-by: Tom Lendacky <thomas.lendacky@amd.com>
Signed-off-by: Herbert Xu <herbert@gondor.apana.org.au>
Diffstat (limited to 'drivers/crypto')
-rw-r--r-- | drivers/crypto/ccp/ccp-crypto-aes-cmac.c | 21 | ||||
-rw-r--r-- | drivers/crypto/ccp/ccp-crypto-sha.c | 21 | ||||
-rw-r--r-- | drivers/crypto/ccp/ccp-crypto.h | 10 | ||||
-rw-r--r-- | drivers/crypto/ccp/ccp-ops.c | 34 |
4 files changed, 53 insertions, 33 deletions
diff --git a/drivers/crypto/ccp/ccp-crypto-aes-cmac.c b/drivers/crypto/ccp/ccp-crypto-aes-cmac.c index c6b8f9e56aab..a52b97a4c843 100644 --- a/drivers/crypto/ccp/ccp-crypto-aes-cmac.c +++ b/drivers/crypto/ccp/ccp-crypto-aes-cmac.c | |||
@@ -37,8 +37,9 @@ static int ccp_aes_cmac_complete(struct crypto_async_request *async_req, | |||
37 | 37 | ||
38 | if (rctx->hash_rem) { | 38 | if (rctx->hash_rem) { |
39 | /* Save remaining data to buffer */ | 39 | /* Save remaining data to buffer */ |
40 | scatterwalk_map_and_copy(rctx->buf, rctx->cmd.u.aes.src, | 40 | unsigned int offset = rctx->nbytes - rctx->hash_rem; |
41 | rctx->hash_cnt, rctx->hash_rem, 0); | 41 | scatterwalk_map_and_copy(rctx->buf, rctx->src, |
42 | offset, rctx->hash_rem, 0); | ||
42 | rctx->buf_count = rctx->hash_rem; | 43 | rctx->buf_count = rctx->hash_rem; |
43 | } else | 44 | } else |
44 | rctx->buf_count = 0; | 45 | rctx->buf_count = 0; |
@@ -62,8 +63,9 @@ static int ccp_do_cmac_update(struct ahash_request *req, unsigned int nbytes, | |||
62 | struct scatterlist *sg, *cmac_key_sg = NULL; | 63 | struct scatterlist *sg, *cmac_key_sg = NULL; |
63 | unsigned int block_size = | 64 | unsigned int block_size = |
64 | crypto_tfm_alg_blocksize(crypto_ahash_tfm(tfm)); | 65 | crypto_tfm_alg_blocksize(crypto_ahash_tfm(tfm)); |
65 | unsigned int len, need_pad, sg_count; | 66 | unsigned int need_pad, sg_count; |
66 | gfp_t gfp; | 67 | gfp_t gfp; |
68 | u64 len; | ||
67 | int ret; | 69 | int ret; |
68 | 70 | ||
69 | if (!ctx->u.aes.key_len) | 71 | if (!ctx->u.aes.key_len) |
@@ -72,7 +74,9 @@ static int ccp_do_cmac_update(struct ahash_request *req, unsigned int nbytes, | |||
72 | if (nbytes) | 74 | if (nbytes) |
73 | rctx->null_msg = 0; | 75 | rctx->null_msg = 0; |
74 | 76 | ||
75 | if (!final && ((nbytes + rctx->buf_count) <= block_size)) { | 77 | len = (u64)rctx->buf_count + (u64)nbytes; |
78 | |||
79 | if (!final && (len <= block_size)) { | ||
76 | scatterwalk_map_and_copy(rctx->buf + rctx->buf_count, req->src, | 80 | scatterwalk_map_and_copy(rctx->buf + rctx->buf_count, req->src, |
77 | 0, nbytes, 0); | 81 | 0, nbytes, 0); |
78 | rctx->buf_count += nbytes; | 82 | rctx->buf_count += nbytes; |
@@ -80,12 +84,13 @@ static int ccp_do_cmac_update(struct ahash_request *req, unsigned int nbytes, | |||
80 | return 0; | 84 | return 0; |
81 | } | 85 | } |
82 | 86 | ||
83 | len = rctx->buf_count + nbytes; | 87 | rctx->src = req->src; |
88 | rctx->nbytes = nbytes; | ||
84 | 89 | ||
85 | rctx->final = final; | 90 | rctx->final = final; |
86 | rctx->hash_cnt = final ? len : len & ~(block_size - 1); | 91 | rctx->hash_rem = final ? 0 : len & (block_size - 1); |
87 | rctx->hash_rem = final ? 0 : len & (block_size - 1); | 92 | rctx->hash_cnt = len - rctx->hash_rem; |
88 | if (!final && (rctx->hash_cnt == len)) { | 93 | if (!final && !rctx->hash_rem) { |
89 | /* CCP can't do zero length final, so keep some data around */ | 94 | /* CCP can't do zero length final, so keep some data around */ |
90 | rctx->hash_cnt -= block_size; | 95 | rctx->hash_cnt -= block_size; |
91 | rctx->hash_rem = block_size; | 96 | rctx->hash_rem = block_size; |
diff --git a/drivers/crypto/ccp/ccp-crypto-sha.c b/drivers/crypto/ccp/ccp-crypto-sha.c index 183d16e46d20..d30f6c893ffb 100644 --- a/drivers/crypto/ccp/ccp-crypto-sha.c +++ b/drivers/crypto/ccp/ccp-crypto-sha.c | |||
@@ -101,8 +101,9 @@ static int ccp_sha_complete(struct crypto_async_request *async_req, int ret) | |||
101 | 101 | ||
102 | if (rctx->hash_rem) { | 102 | if (rctx->hash_rem) { |
103 | /* Save remaining data to buffer */ | 103 | /* Save remaining data to buffer */ |
104 | scatterwalk_map_and_copy(rctx->buf, rctx->cmd.u.sha.src, | 104 | unsigned int offset = rctx->nbytes - rctx->hash_rem; |
105 | rctx->hash_cnt, rctx->hash_rem, 0); | 105 | scatterwalk_map_and_copy(rctx->buf, rctx->src, |
106 | offset, rctx->hash_rem, 0); | ||
106 | rctx->buf_count = rctx->hash_rem; | 107 | rctx->buf_count = rctx->hash_rem; |
107 | } else | 108 | } else |
108 | rctx->buf_count = 0; | 109 | rctx->buf_count = 0; |
@@ -129,11 +130,14 @@ static int ccp_do_sha_update(struct ahash_request *req, unsigned int nbytes, | |||
129 | struct scatterlist *sg; | 130 | struct scatterlist *sg; |
130 | unsigned int block_size = | 131 | unsigned int block_size = |
131 | crypto_tfm_alg_blocksize(crypto_ahash_tfm(tfm)); | 132 | crypto_tfm_alg_blocksize(crypto_ahash_tfm(tfm)); |
132 | unsigned int len, sg_count; | 133 | unsigned int sg_count; |
133 | gfp_t gfp; | 134 | gfp_t gfp; |
135 | u64 len; | ||
134 | int ret; | 136 | int ret; |
135 | 137 | ||
136 | if (!final && ((nbytes + rctx->buf_count) <= block_size)) { | 138 | len = (u64)rctx->buf_count + (u64)nbytes; |
139 | |||
140 | if (!final && (len <= block_size)) { | ||
137 | scatterwalk_map_and_copy(rctx->buf + rctx->buf_count, req->src, | 141 | scatterwalk_map_and_copy(rctx->buf + rctx->buf_count, req->src, |
138 | 0, nbytes, 0); | 142 | 0, nbytes, 0); |
139 | rctx->buf_count += nbytes; | 143 | rctx->buf_count += nbytes; |
@@ -141,12 +145,13 @@ static int ccp_do_sha_update(struct ahash_request *req, unsigned int nbytes, | |||
141 | return 0; | 145 | return 0; |
142 | } | 146 | } |
143 | 147 | ||
144 | len = rctx->buf_count + nbytes; | 148 | rctx->src = req->src; |
149 | rctx->nbytes = nbytes; | ||
145 | 150 | ||
146 | rctx->final = final; | 151 | rctx->final = final; |
147 | rctx->hash_cnt = final ? len : len & ~(block_size - 1); | 152 | rctx->hash_rem = final ? 0 : len & (block_size - 1); |
148 | rctx->hash_rem = final ? 0 : len & (block_size - 1); | 153 | rctx->hash_cnt = len - rctx->hash_rem; |
149 | if (!final && (rctx->hash_cnt == len)) { | 154 | if (!final && !rctx->hash_rem) { |
150 | /* CCP can't do zero length final, so keep some data around */ | 155 | /* CCP can't do zero length final, so keep some data around */ |
151 | rctx->hash_cnt -= block_size; | 156 | rctx->hash_cnt -= block_size; |
152 | rctx->hash_rem = block_size; | 157 | rctx->hash_rem = block_size; |
diff --git a/drivers/crypto/ccp/ccp-crypto.h b/drivers/crypto/ccp/ccp-crypto.h index 13ea6ea4b45d..b222231b6169 100644 --- a/drivers/crypto/ccp/ccp-crypto.h +++ b/drivers/crypto/ccp/ccp-crypto.h | |||
@@ -110,7 +110,10 @@ struct ccp_aes_cmac_req_ctx { | |||
110 | unsigned int null_msg; | 110 | unsigned int null_msg; |
111 | unsigned int final; | 111 | unsigned int final; |
112 | 112 | ||
113 | unsigned int hash_cnt; | 113 | struct scatterlist *src; |
114 | unsigned int nbytes; | ||
115 | |||
116 | u64 hash_cnt; | ||
114 | unsigned int hash_rem; | 117 | unsigned int hash_rem; |
115 | 118 | ||
116 | struct sg_table data_sg; | 119 | struct sg_table data_sg; |
@@ -149,7 +152,10 @@ struct ccp_sha_req_ctx { | |||
149 | unsigned int first; | 152 | unsigned int first; |
150 | unsigned int final; | 153 | unsigned int final; |
151 | 154 | ||
152 | unsigned int hash_cnt; | 155 | struct scatterlist *src; |
156 | unsigned int nbytes; | ||
157 | |||
158 | u64 hash_cnt; | ||
153 | unsigned int hash_rem; | 159 | unsigned int hash_rem; |
154 | 160 | ||
155 | struct sg_table data_sg; | 161 | struct sg_table data_sg; |
diff --git a/drivers/crypto/ccp/ccp-ops.c b/drivers/crypto/ccp/ccp-ops.c index 4be091037549..71ed3ade7e12 100644 --- a/drivers/crypto/ccp/ccp-ops.c +++ b/drivers/crypto/ccp/ccp-ops.c | |||
@@ -60,9 +60,9 @@ struct ccp_sg_workarea { | |||
60 | unsigned int dma_count; | 60 | unsigned int dma_count; |
61 | enum dma_data_direction dma_dir; | 61 | enum dma_data_direction dma_dir; |
62 | 62 | ||
63 | u32 sg_used; | 63 | unsigned int sg_used; |
64 | 64 | ||
65 | u32 bytes_left; | 65 | u64 bytes_left; |
66 | }; | 66 | }; |
67 | 67 | ||
68 | struct ccp_data { | 68 | struct ccp_data { |
@@ -466,7 +466,7 @@ static void ccp_sg_free(struct ccp_sg_workarea *wa) | |||
466 | } | 466 | } |
467 | 467 | ||
468 | static int ccp_init_sg_workarea(struct ccp_sg_workarea *wa, struct device *dev, | 468 | static int ccp_init_sg_workarea(struct ccp_sg_workarea *wa, struct device *dev, |
469 | struct scatterlist *sg, unsigned int len, | 469 | struct scatterlist *sg, u64 len, |
470 | enum dma_data_direction dma_dir) | 470 | enum dma_data_direction dma_dir) |
471 | { | 471 | { |
472 | memset(wa, 0, sizeof(*wa)); | 472 | memset(wa, 0, sizeof(*wa)); |
@@ -499,7 +499,7 @@ static int ccp_init_sg_workarea(struct ccp_sg_workarea *wa, struct device *dev, | |||
499 | 499 | ||
500 | static void ccp_update_sg_workarea(struct ccp_sg_workarea *wa, unsigned int len) | 500 | static void ccp_update_sg_workarea(struct ccp_sg_workarea *wa, unsigned int len) |
501 | { | 501 | { |
502 | unsigned int nbytes = min(len, wa->bytes_left); | 502 | unsigned int nbytes = min_t(u64, len, wa->bytes_left); |
503 | 503 | ||
504 | if (!wa->sg) | 504 | if (!wa->sg) |
505 | return; | 505 | return; |
@@ -653,7 +653,7 @@ static void ccp_free_data(struct ccp_data *data, struct ccp_cmd_queue *cmd_q) | |||
653 | } | 653 | } |
654 | 654 | ||
655 | static int ccp_init_data(struct ccp_data *data, struct ccp_cmd_queue *cmd_q, | 655 | static int ccp_init_data(struct ccp_data *data, struct ccp_cmd_queue *cmd_q, |
656 | struct scatterlist *sg, unsigned int sg_len, | 656 | struct scatterlist *sg, u64 sg_len, |
657 | unsigned int dm_len, | 657 | unsigned int dm_len, |
658 | enum dma_data_direction dir) | 658 | enum dma_data_direction dir) |
659 | { | 659 | { |
@@ -691,17 +691,20 @@ static unsigned int ccp_queue_buf(struct ccp_data *data, unsigned int from) | |||
691 | if (!sg_wa->sg) | 691 | if (!sg_wa->sg) |
692 | return 0; | 692 | return 0; |
693 | 693 | ||
694 | /* Perform the copy operation */ | 694 | /* Perform the copy operation |
695 | nbytes = min(sg_wa->bytes_left, dm_wa->length); | 695 | * nbytes will always be <= UINT_MAX because dm_wa->length is |
696 | * an unsigned int | ||
697 | */ | ||
698 | nbytes = min_t(u64, sg_wa->bytes_left, dm_wa->length); | ||
696 | scatterwalk_map_and_copy(dm_wa->address, sg_wa->sg, sg_wa->sg_used, | 699 | scatterwalk_map_and_copy(dm_wa->address, sg_wa->sg, sg_wa->sg_used, |
697 | nbytes, from); | 700 | nbytes, from); |
698 | 701 | ||
699 | /* Update the structures and generate the count */ | 702 | /* Update the structures and generate the count */ |
700 | buf_count = 0; | 703 | buf_count = 0; |
701 | while (sg_wa->bytes_left && (buf_count < dm_wa->length)) { | 704 | while (sg_wa->bytes_left && (buf_count < dm_wa->length)) { |
702 | nbytes = min3(sg_wa->sg->length - sg_wa->sg_used, | 705 | nbytes = min(sg_wa->sg->length - sg_wa->sg_used, |
703 | dm_wa->length - buf_count, | 706 | dm_wa->length - buf_count); |
704 | sg_wa->bytes_left); | 707 | nbytes = min_t(u64, sg_wa->bytes_left, nbytes); |
705 | 708 | ||
706 | buf_count += nbytes; | 709 | buf_count += nbytes; |
707 | ccp_update_sg_workarea(sg_wa, nbytes); | 710 | ccp_update_sg_workarea(sg_wa, nbytes); |
@@ -728,14 +731,15 @@ static void ccp_prepare_data(struct ccp_data *src, struct ccp_data *dst, | |||
728 | 731 | ||
729 | /* The CCP can only DMA from/to one address each per operation. This | 732 | /* The CCP can only DMA from/to one address each per operation. This |
730 | * requires that we find the smallest DMA area between the source | 733 | * requires that we find the smallest DMA area between the source |
731 | * and destination. | 734 | * and destination. The resulting len values will always be <= UINT_MAX |
735 | * because the dma length is an unsigned int. | ||
732 | */ | 736 | */ |
733 | sg_src_len = min(sg_dma_len(src->sg_wa.sg) - src->sg_wa.sg_used, | 737 | sg_src_len = sg_dma_len(src->sg_wa.sg) - src->sg_wa.sg_used; |
734 | src->sg_wa.bytes_left); | 738 | sg_src_len = min_t(u64, src->sg_wa.bytes_left, sg_src_len); |
735 | 739 | ||
736 | if (dst) { | 740 | if (dst) { |
737 | sg_dst_len = min(sg_dma_len(dst->sg_wa.sg) - dst->sg_wa.sg_used, | 741 | sg_dst_len = sg_dma_len(dst->sg_wa.sg) - dst->sg_wa.sg_used; |
738 | src->sg_wa.bytes_left); | 742 | sg_dst_len = min_t(u64, src->sg_wa.bytes_left, sg_dst_len); |
739 | op_len = min(sg_src_len, sg_dst_len); | 743 | op_len = min(sg_src_len, sg_dst_len); |
740 | } else | 744 | } else |
741 | op_len = sg_src_len; | 745 | op_len = sg_src_len; |