aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/crypto/mxs-dcp.c
diff options
context:
space:
mode:
authorMarek Vasut <marex@denx.de>2014-01-14 12:31:01 -0500
committerHerbert Xu <herbert@gondor.apana.org.au>2014-02-08 20:59:12 -0500
commit2021abaa00da64a4b98948c93bf31a55386cd2d0 (patch)
treedaacd5d6dc872bdcfde71aafd8fd67c9541aa99e /drivers/crypto/mxs-dcp.c
parent4293242db153512dcfc7e7af9af683e5b97dd4ce (diff)
crypto: dcp - Move the AES operation type from actx to rctx
Move the AES operation type and mode from async crypto context to crypto request context. This allows for recycling of the async crypto context for different kinds of operations. I found this problem when I used dm-crypt, which uses the same async crypto context (actx) for both encryption and decryption requests. Since the requests are enqueued into the processing queue, immediatelly storing the type of operation into async crypto context (actx) caused corruption of this information when encryption and decryption operations followed imediatelly one after the other. When the first operation was dequeued, the second operation was already enqueued and overwritten the type of operation in actx, thus causing incorrect result of the first operation. Fix this problem by storing the type of operation into the crypto request context. Signed-off-by: Marek Vasut <marex@denx.de> Cc: David S. Miller <davem@davemloft.net> Cc: Fabio Estevam <fabio.estevam@freescale.com> Cc: Shawn Guo <shawn.guo@linaro.org> Cc: Tom Lendacky <thomas.lendacky@amd.com> Signed-off-by: Herbert Xu <herbert@gondor.apana.org.au>
Diffstat (limited to 'drivers/crypto/mxs-dcp.c')
-rw-r--r--drivers/crypto/mxs-dcp.c27
1 files changed, 17 insertions, 10 deletions
diff --git a/drivers/crypto/mxs-dcp.c b/drivers/crypto/mxs-dcp.c
index a6db7fa6f891..56bde65ddadf 100644
--- a/drivers/crypto/mxs-dcp.c
+++ b/drivers/crypto/mxs-dcp.c
@@ -83,13 +83,16 @@ struct dcp_async_ctx {
83 unsigned int hot:1; 83 unsigned int hot:1;
84 84
85 /* Crypto-specific context */ 85 /* Crypto-specific context */
86 unsigned int enc:1;
87 unsigned int ecb:1;
88 struct crypto_ablkcipher *fallback; 86 struct crypto_ablkcipher *fallback;
89 unsigned int key_len; 87 unsigned int key_len;
90 uint8_t key[AES_KEYSIZE_128]; 88 uint8_t key[AES_KEYSIZE_128];
91}; 89};
92 90
91struct dcp_aes_req_ctx {
92 unsigned int enc:1;
93 unsigned int ecb:1;
94};
95
93struct dcp_sha_req_ctx { 96struct dcp_sha_req_ctx {
94 unsigned int init:1; 97 unsigned int init:1;
95 unsigned int fini:1; 98 unsigned int fini:1;
@@ -190,10 +193,12 @@ static int mxs_dcp_start_dma(struct dcp_async_ctx *actx)
190/* 193/*
191 * Encryption (AES128) 194 * Encryption (AES128)
192 */ 195 */
193static int mxs_dcp_run_aes(struct dcp_async_ctx *actx, int init) 196static int mxs_dcp_run_aes(struct dcp_async_ctx *actx,
197 struct ablkcipher_request *req, int init)
194{ 198{
195 struct dcp *sdcp = global_sdcp; 199 struct dcp *sdcp = global_sdcp;
196 struct dcp_dma_desc *desc = &sdcp->coh->desc[actx->chan]; 200 struct dcp_dma_desc *desc = &sdcp->coh->desc[actx->chan];
201 struct dcp_aes_req_ctx *rctx = ablkcipher_request_ctx(req);
197 int ret; 202 int ret;
198 203
199 dma_addr_t key_phys = dma_map_single(sdcp->dev, sdcp->coh->aes_key, 204 dma_addr_t key_phys = dma_map_single(sdcp->dev, sdcp->coh->aes_key,
@@ -212,14 +217,14 @@ static int mxs_dcp_run_aes(struct dcp_async_ctx *actx, int init)
212 /* Payload contains the key. */ 217 /* Payload contains the key. */
213 desc->control0 |= MXS_DCP_CONTROL0_PAYLOAD_KEY; 218 desc->control0 |= MXS_DCP_CONTROL0_PAYLOAD_KEY;
214 219
215 if (actx->enc) 220 if (rctx->enc)
216 desc->control0 |= MXS_DCP_CONTROL0_CIPHER_ENCRYPT; 221 desc->control0 |= MXS_DCP_CONTROL0_CIPHER_ENCRYPT;
217 if (init) 222 if (init)
218 desc->control0 |= MXS_DCP_CONTROL0_CIPHER_INIT; 223 desc->control0 |= MXS_DCP_CONTROL0_CIPHER_INIT;
219 224
220 desc->control1 = MXS_DCP_CONTROL1_CIPHER_SELECT_AES128; 225 desc->control1 = MXS_DCP_CONTROL1_CIPHER_SELECT_AES128;
221 226
222 if (actx->ecb) 227 if (rctx->ecb)
223 desc->control1 |= MXS_DCP_CONTROL1_CIPHER_MODE_ECB; 228 desc->control1 |= MXS_DCP_CONTROL1_CIPHER_MODE_ECB;
224 else 229 else
225 desc->control1 |= MXS_DCP_CONTROL1_CIPHER_MODE_CBC; 230 desc->control1 |= MXS_DCP_CONTROL1_CIPHER_MODE_CBC;
@@ -247,6 +252,7 @@ static int mxs_dcp_aes_block_crypt(struct crypto_async_request *arq)
247 252
248 struct ablkcipher_request *req = ablkcipher_request_cast(arq); 253 struct ablkcipher_request *req = ablkcipher_request_cast(arq);
249 struct dcp_async_ctx *actx = crypto_tfm_ctx(arq->tfm); 254 struct dcp_async_ctx *actx = crypto_tfm_ctx(arq->tfm);
255 struct dcp_aes_req_ctx *rctx = ablkcipher_request_ctx(req);
250 256
251 struct scatterlist *dst = req->dst; 257 struct scatterlist *dst = req->dst;
252 struct scatterlist *src = req->src; 258 struct scatterlist *src = req->src;
@@ -271,7 +277,7 @@ static int mxs_dcp_aes_block_crypt(struct crypto_async_request *arq)
271 /* Copy the key from the temporary location. */ 277 /* Copy the key from the temporary location. */
272 memcpy(key, actx->key, actx->key_len); 278 memcpy(key, actx->key, actx->key_len);
273 279
274 if (!actx->ecb) { 280 if (!rctx->ecb) {
275 /* Copy the CBC IV just past the key. */ 281 /* Copy the CBC IV just past the key. */
276 memcpy(key + AES_KEYSIZE_128, req->info, AES_KEYSIZE_128); 282 memcpy(key + AES_KEYSIZE_128, req->info, AES_KEYSIZE_128);
277 /* CBC needs the INIT set. */ 283 /* CBC needs the INIT set. */
@@ -300,7 +306,7 @@ static int mxs_dcp_aes_block_crypt(struct crypto_async_request *arq)
300 * submit the buffer. 306 * submit the buffer.
301 */ 307 */
302 if (actx->fill == out_off || sg_is_last(src)) { 308 if (actx->fill == out_off || sg_is_last(src)) {
303 ret = mxs_dcp_run_aes(actx, init); 309 ret = mxs_dcp_run_aes(actx, req, init);
304 if (ret) 310 if (ret)
305 return ret; 311 return ret;
306 init = 0; 312 init = 0;
@@ -391,13 +397,14 @@ static int mxs_dcp_aes_enqueue(struct ablkcipher_request *req, int enc, int ecb)
391 struct dcp *sdcp = global_sdcp; 397 struct dcp *sdcp = global_sdcp;
392 struct crypto_async_request *arq = &req->base; 398 struct crypto_async_request *arq = &req->base;
393 struct dcp_async_ctx *actx = crypto_tfm_ctx(arq->tfm); 399 struct dcp_async_ctx *actx = crypto_tfm_ctx(arq->tfm);
400 struct dcp_aes_req_ctx *rctx = ablkcipher_request_ctx(req);
394 int ret; 401 int ret;
395 402
396 if (unlikely(actx->key_len != AES_KEYSIZE_128)) 403 if (unlikely(actx->key_len != AES_KEYSIZE_128))
397 return mxs_dcp_block_fallback(req, enc); 404 return mxs_dcp_block_fallback(req, enc);
398 405
399 actx->enc = enc; 406 rctx->enc = enc;
400 actx->ecb = ecb; 407 rctx->ecb = ecb;
401 actx->chan = DCP_CHAN_CRYPTO; 408 actx->chan = DCP_CHAN_CRYPTO;
402 409
403 mutex_lock(&sdcp->mutex[actx->chan]); 410 mutex_lock(&sdcp->mutex[actx->chan]);
@@ -484,7 +491,7 @@ static int mxs_dcp_aes_fallback_init(struct crypto_tfm *tfm)
484 return PTR_ERR(blk); 491 return PTR_ERR(blk);
485 492
486 actx->fallback = blk; 493 actx->fallback = blk;
487 tfm->crt_ablkcipher.reqsize = sizeof(struct dcp_async_ctx); 494 tfm->crt_ablkcipher.reqsize = sizeof(struct dcp_aes_req_ctx);
488 return 0; 495 return 0;
489} 496}
490 497