summaryrefslogtreecommitdiffstats
path: root/drivers/crypto
diff options
context:
space:
mode:
authorTom Lendacky <thomas.lendacky@amd.com>2014-01-06 14:34:05 -0500
committerHerbert Xu <herbert@gondor.apana.org.au>2014-01-14 22:33:36 -0500
commit77dc4a51a9d3386c318c1622fc126964f7cb0ee2 (patch)
tree734c859d66bd7665e1bb463814703b49c17e78a0 /drivers/crypto
parent5258de8af04af0caf129f1ce432d1817a5800887 (diff)
crypto: ccp - Cleanup scatterlist usage
Cleanup up the usage of scatterlists to make the code cleaner and avoid extra memory allocations when not needed. Signed-off-by: Tom Lendacky <thomas.lendacky@amd.com> Signed-off-by: Herbert Xu <herbert@gondor.apana.org.au>
Diffstat (limited to 'drivers/crypto')
-rw-r--r--drivers/crypto/ccp/ccp-crypto-aes-cmac.c6
-rw-r--r--drivers/crypto/ccp/ccp-crypto-sha.c53
2 files changed, 33 insertions, 26 deletions
diff --git a/drivers/crypto/ccp/ccp-crypto-aes-cmac.c b/drivers/crypto/ccp/ccp-crypto-aes-cmac.c
index 398832c2414a..646c8d1bd03c 100644
--- a/drivers/crypto/ccp/ccp-crypto-aes-cmac.c
+++ b/drivers/crypto/ccp/ccp-crypto-aes-cmac.c
@@ -125,8 +125,10 @@ static int ccp_do_cmac_update(struct ahash_request *req, unsigned int nbytes,
125 sg_init_one(&rctx->pad_sg, rctx->pad, pad_length); 125 sg_init_one(&rctx->pad_sg, rctx->pad, pad_length);
126 sg = ccp_crypto_sg_table_add(&rctx->data_sg, &rctx->pad_sg); 126 sg = ccp_crypto_sg_table_add(&rctx->data_sg, &rctx->pad_sg);
127 } 127 }
128 if (sg) 128 if (sg) {
129 sg_mark_end(sg); 129 sg_mark_end(sg);
130 sg = rctx->data_sg.sgl;
131 }
130 132
131 /* Initialize the K1/K2 scatterlist */ 133 /* Initialize the K1/K2 scatterlist */
132 if (final) 134 if (final)
@@ -143,7 +145,7 @@ static int ccp_do_cmac_update(struct ahash_request *req, unsigned int nbytes,
143 rctx->cmd.u.aes.key_len = ctx->u.aes.key_len; 145 rctx->cmd.u.aes.key_len = ctx->u.aes.key_len;
144 rctx->cmd.u.aes.iv = &rctx->iv_sg; 146 rctx->cmd.u.aes.iv = &rctx->iv_sg;
145 rctx->cmd.u.aes.iv_len = AES_BLOCK_SIZE; 147 rctx->cmd.u.aes.iv_len = AES_BLOCK_SIZE;
146 rctx->cmd.u.aes.src = (sg) ? rctx->data_sg.sgl : NULL; 148 rctx->cmd.u.aes.src = sg;
147 rctx->cmd.u.aes.src_len = rctx->hash_cnt; 149 rctx->cmd.u.aes.src_len = rctx->hash_cnt;
148 rctx->cmd.u.aes.dst = NULL; 150 rctx->cmd.u.aes.dst = NULL;
149 rctx->cmd.u.aes.cmac_key = cmac_key_sg; 151 rctx->cmd.u.aes.cmac_key = cmac_key_sg;
diff --git a/drivers/crypto/ccp/ccp-crypto-sha.c b/drivers/crypto/ccp/ccp-crypto-sha.c
index 0571940cfdf9..bf913cbb3e96 100644
--- a/drivers/crypto/ccp/ccp-crypto-sha.c
+++ b/drivers/crypto/ccp/ccp-crypto-sha.c
@@ -122,7 +122,6 @@ static int ccp_do_sha_update(struct ahash_request *req, unsigned int nbytes,
122 unsigned int final) 122 unsigned int final)
123{ 123{
124 struct crypto_ahash *tfm = crypto_ahash_reqtfm(req); 124 struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
125 struct ccp_ctx *ctx = crypto_ahash_ctx(tfm);
126 struct ccp_sha_req_ctx *rctx = ahash_request_ctx(req); 125 struct ccp_sha_req_ctx *rctx = ahash_request_ctx(req);
127 struct scatterlist *sg; 126 struct scatterlist *sg;
128 unsigned int block_size = 127 unsigned int block_size =
@@ -153,35 +152,32 @@ static int ccp_do_sha_update(struct ahash_request *req, unsigned int nbytes,
153 /* Initialize the context scatterlist */ 152 /* Initialize the context scatterlist */
154 sg_init_one(&rctx->ctx_sg, rctx->ctx, sizeof(rctx->ctx)); 153 sg_init_one(&rctx->ctx_sg, rctx->ctx, sizeof(rctx->ctx));
155 154
156 /* Build the data scatterlist table - allocate enough entries for all
157 * possible data pieces (hmac ipad, buffer, input data)
158 */
159 sg_count = (nbytes) ? sg_nents(req->src) + 2 : 2;
160 gfp = req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP ?
161 GFP_KERNEL : GFP_ATOMIC;
162 ret = sg_alloc_table(&rctx->data_sg, sg_count, gfp);
163 if (ret)
164 return ret;
165
166 sg = NULL; 155 sg = NULL;
167 if (rctx->first && ctx->u.sha.key_len) { 156 if (rctx->buf_count && nbytes) {
168 rctx->hash_cnt += block_size; 157 /* Build the data scatterlist table - allocate enough entries
169 158 * for both data pieces (buffer and input data)
170 sg_init_one(&rctx->pad_sg, ctx->u.sha.ipad, block_size); 159 */
171 sg = ccp_crypto_sg_table_add(&rctx->data_sg, &rctx->pad_sg); 160 gfp = req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP ?
172 } 161 GFP_KERNEL : GFP_ATOMIC;
162 sg_count = sg_nents(req->src) + 1;
163 ret = sg_alloc_table(&rctx->data_sg, sg_count, gfp);
164 if (ret)
165 return ret;
173 166
174 if (rctx->buf_count) {
175 sg_init_one(&rctx->buf_sg, rctx->buf, rctx->buf_count); 167 sg_init_one(&rctx->buf_sg, rctx->buf, rctx->buf_count);
176 sg = ccp_crypto_sg_table_add(&rctx->data_sg, &rctx->buf_sg); 168 sg = ccp_crypto_sg_table_add(&rctx->data_sg, &rctx->buf_sg);
177 }
178
179 if (nbytes)
180 sg = ccp_crypto_sg_table_add(&rctx->data_sg, req->src); 169 sg = ccp_crypto_sg_table_add(&rctx->data_sg, req->src);
181
182 if (sg)
183 sg_mark_end(sg); 170 sg_mark_end(sg);
184 171
172 sg = rctx->data_sg.sgl;
173 } else if (rctx->buf_count) {
174 sg_init_one(&rctx->buf_sg, rctx->buf, rctx->buf_count);
175
176 sg = &rctx->buf_sg;
177 } else if (nbytes) {
178 sg = req->src;
179 }
180
185 rctx->msg_bits += (rctx->hash_cnt << 3); /* Total in bits */ 181 rctx->msg_bits += (rctx->hash_cnt << 3); /* Total in bits */
186 182
187 memset(&rctx->cmd, 0, sizeof(rctx->cmd)); 183 memset(&rctx->cmd, 0, sizeof(rctx->cmd));
@@ -190,7 +186,7 @@ static int ccp_do_sha_update(struct ahash_request *req, unsigned int nbytes,
190 rctx->cmd.u.sha.type = rctx->type; 186 rctx->cmd.u.sha.type = rctx->type;
191 rctx->cmd.u.sha.ctx = &rctx->ctx_sg; 187 rctx->cmd.u.sha.ctx = &rctx->ctx_sg;
192 rctx->cmd.u.sha.ctx_len = sizeof(rctx->ctx); 188 rctx->cmd.u.sha.ctx_len = sizeof(rctx->ctx);
193 rctx->cmd.u.sha.src = (sg) ? rctx->data_sg.sgl : NULL; 189 rctx->cmd.u.sha.src = sg;
194 rctx->cmd.u.sha.src_len = rctx->hash_cnt; 190 rctx->cmd.u.sha.src_len = rctx->hash_cnt;
195 rctx->cmd.u.sha.final = rctx->final; 191 rctx->cmd.u.sha.final = rctx->final;
196 rctx->cmd.u.sha.msg_bits = rctx->msg_bits; 192 rctx->cmd.u.sha.msg_bits = rctx->msg_bits;
@@ -205,9 +201,12 @@ static int ccp_do_sha_update(struct ahash_request *req, unsigned int nbytes,
205static int ccp_sha_init(struct ahash_request *req) 201static int ccp_sha_init(struct ahash_request *req)
206{ 202{
207 struct crypto_ahash *tfm = crypto_ahash_reqtfm(req); 203 struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
204 struct ccp_ctx *ctx = crypto_ahash_ctx(tfm);
208 struct ccp_sha_req_ctx *rctx = ahash_request_ctx(req); 205 struct ccp_sha_req_ctx *rctx = ahash_request_ctx(req);
209 struct ccp_crypto_ahash_alg *alg = 206 struct ccp_crypto_ahash_alg *alg =
210 ccp_crypto_ahash_alg(crypto_ahash_tfm(tfm)); 207 ccp_crypto_ahash_alg(crypto_ahash_tfm(tfm));
208 unsigned int block_size =
209 crypto_tfm_alg_blocksize(crypto_ahash_tfm(tfm));
211 210
212 memset(rctx, 0, sizeof(*rctx)); 211 memset(rctx, 0, sizeof(*rctx));
213 212
@@ -215,6 +214,12 @@ static int ccp_sha_init(struct ahash_request *req)
215 rctx->type = alg->type; 214 rctx->type = alg->type;
216 rctx->first = 1; 215 rctx->first = 1;
217 216
217 if (ctx->u.sha.key_len) {
218 /* Buffer the HMAC key for first update */
219 memcpy(rctx->buf, ctx->u.sha.ipad, block_size);
220 rctx->buf_count = block_size;
221 }
222
218 return 0; 223 return 0;
219} 224}
220 225