diff options
author | Huang Ying <ying.huang@intel.com> | 2009-03-16 13:44:33 -0400 |
---|---|---|
committer | Alasdair G Kergon <agk@redhat.com> | 2009-03-16 13:44:33 -0400 |
commit | b2174eebd1fadb76454dad09a1dacbc17081e6b0 (patch) | |
tree | a53722388ed012d097cf58a29e484957109c0b7b /drivers/md/dm-crypt.c | |
parent | d659e6cc98766a1a61d6bdd283f95d149abd7719 (diff) |
dm crypt: fix kcryptd_async_done parameter
In the async encryption-complete function (kcryptd_async_done), the
crypto_async_request passed in may be different from the one passed to
crypto_ablkcipher_encrypt/decrypt. Only crypto_async_request->data is
guaranteed to be same as the one passed in. The current
kcryptd_async_done uses the passed-in crypto_async_request directly
which may cause the AES-NI-based AES algorithm implementation to panic.
This patch fixes this bug by only using crypto_async_request->data,
which points to dm_crypt_request, the crypto_async_request passed in.
The original data (convert_context) is gotten from dm_crypt_request.
[mbroz@redhat.com: reworked]
Cc: stable@kernel.org
Signed-off-by: Huang Ying <ying.huang@intel.com>
Cc: Herbert Xu <herbert@gondor.apana.org.au>
Signed-off-by: Milan Broz <mbroz@redhat.com>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
Signed-off-by: Alasdair G Kergon <agk@redhat.com>
Diffstat (limited to 'drivers/md/dm-crypt.c')
-rw-r--r-- | drivers/md/dm-crypt.c | 26 |
1 files changed, 21 insertions, 5 deletions
diff --git a/drivers/md/dm-crypt.c b/drivers/md/dm-crypt.c index 35bda49796fb..ebab49f8cc1d 100644 --- a/drivers/md/dm-crypt.c +++ b/drivers/md/dm-crypt.c | |||
@@ -60,6 +60,7 @@ struct dm_crypt_io { | |||
60 | }; | 60 | }; |
61 | 61 | ||
62 | struct dm_crypt_request { | 62 | struct dm_crypt_request { |
63 | struct convert_context *ctx; | ||
63 | struct scatterlist sg_in; | 64 | struct scatterlist sg_in; |
64 | struct scatterlist sg_out; | 65 | struct scatterlist sg_out; |
65 | }; | 66 | }; |
@@ -335,6 +336,18 @@ static void crypt_convert_init(struct crypt_config *cc, | |||
335 | init_completion(&ctx->restart); | 336 | init_completion(&ctx->restart); |
336 | } | 337 | } |
337 | 338 | ||
339 | static struct dm_crypt_request *dmreq_of_req(struct crypt_config *cc, | ||
340 | struct ablkcipher_request *req) | ||
341 | { | ||
342 | return (struct dm_crypt_request *)((char *)req + cc->dmreq_start); | ||
343 | } | ||
344 | |||
345 | static struct ablkcipher_request *req_of_dmreq(struct crypt_config *cc, | ||
346 | struct dm_crypt_request *dmreq) | ||
347 | { | ||
348 | return (struct ablkcipher_request *)((char *)dmreq - cc->dmreq_start); | ||
349 | } | ||
350 | |||
338 | static int crypt_convert_block(struct crypt_config *cc, | 351 | static int crypt_convert_block(struct crypt_config *cc, |
339 | struct convert_context *ctx, | 352 | struct convert_context *ctx, |
340 | struct ablkcipher_request *req) | 353 | struct ablkcipher_request *req) |
@@ -345,10 +358,11 @@ static int crypt_convert_block(struct crypt_config *cc, | |||
345 | u8 *iv; | 358 | u8 *iv; |
346 | int r = 0; | 359 | int r = 0; |
347 | 360 | ||
348 | dmreq = (struct dm_crypt_request *)((char *)req + cc->dmreq_start); | 361 | dmreq = dmreq_of_req(cc, req); |
349 | iv = (u8 *)ALIGN((unsigned long)(dmreq + 1), | 362 | iv = (u8 *)ALIGN((unsigned long)(dmreq + 1), |
350 | crypto_ablkcipher_alignmask(cc->tfm) + 1); | 363 | crypto_ablkcipher_alignmask(cc->tfm) + 1); |
351 | 364 | ||
365 | dmreq->ctx = ctx; | ||
352 | sg_init_table(&dmreq->sg_in, 1); | 366 | sg_init_table(&dmreq->sg_in, 1); |
353 | sg_set_page(&dmreq->sg_in, bv_in->bv_page, 1 << SECTOR_SHIFT, | 367 | sg_set_page(&dmreq->sg_in, bv_in->bv_page, 1 << SECTOR_SHIFT, |
354 | bv_in->bv_offset + ctx->offset_in); | 368 | bv_in->bv_offset + ctx->offset_in); |
@@ -395,8 +409,9 @@ static void crypt_alloc_req(struct crypt_config *cc, | |||
395 | cc->req = mempool_alloc(cc->req_pool, GFP_NOIO); | 409 | cc->req = mempool_alloc(cc->req_pool, GFP_NOIO); |
396 | ablkcipher_request_set_tfm(cc->req, cc->tfm); | 410 | ablkcipher_request_set_tfm(cc->req, cc->tfm); |
397 | ablkcipher_request_set_callback(cc->req, CRYPTO_TFM_REQ_MAY_BACKLOG | | 411 | ablkcipher_request_set_callback(cc->req, CRYPTO_TFM_REQ_MAY_BACKLOG | |
398 | CRYPTO_TFM_REQ_MAY_SLEEP, | 412 | CRYPTO_TFM_REQ_MAY_SLEEP, |
399 | kcryptd_async_done, ctx); | 413 | kcryptd_async_done, |
414 | dmreq_of_req(cc, cc->req)); | ||
400 | } | 415 | } |
401 | 416 | ||
402 | /* | 417 | /* |
@@ -821,7 +836,8 @@ static void kcryptd_crypt_read_convert(struct dm_crypt_io *io) | |||
821 | static void kcryptd_async_done(struct crypto_async_request *async_req, | 836 | static void kcryptd_async_done(struct crypto_async_request *async_req, |
822 | int error) | 837 | int error) |
823 | { | 838 | { |
824 | struct convert_context *ctx = async_req->data; | 839 | struct dm_crypt_request *dmreq = async_req->data; |
840 | struct convert_context *ctx = dmreq->ctx; | ||
825 | struct dm_crypt_io *io = container_of(ctx, struct dm_crypt_io, ctx); | 841 | struct dm_crypt_io *io = container_of(ctx, struct dm_crypt_io, ctx); |
826 | struct crypt_config *cc = io->target->private; | 842 | struct crypt_config *cc = io->target->private; |
827 | 843 | ||
@@ -830,7 +846,7 @@ static void kcryptd_async_done(struct crypto_async_request *async_req, | |||
830 | return; | 846 | return; |
831 | } | 847 | } |
832 | 848 | ||
833 | mempool_free(ablkcipher_request_cast(async_req), cc->req_pool); | 849 | mempool_free(req_of_dmreq(cc, dmreq), cc->req_pool); |
834 | 850 | ||
835 | if (!atomic_dec_and_test(&ctx->pending)) | 851 | if (!atomic_dec_and_test(&ctx->pending)) |
836 | return; | 852 | return; |