diff options
author | Ben Collins <ben.c@servergy.com> | 2015-04-03 12:09:46 -0400 |
---|---|---|
committer | Mike Snitzer <snitzer@redhat.com> | 2015-04-15 12:10:26 -0400 |
commit | 0618764cb25f6fa9fb31152995de42a8a0496475 (patch) | |
tree | 16e5a37e6f584465d05c8df160a81e35609182a8 | |
parent | 5977907937afa2b5584a874d44ba6c0f56aeaa9c (diff) |
dm crypt: fix deadlock when async crypto algorithm returns -EBUSY
I suspect this doesn't show up for most anyone because software
algorithms typically don't have a sense of being too busy. However,
when working with the Freescale CAAM driver it will return -EBUSY on
occasion under heavy -- which resulted in dm-crypt deadlock.
After checking the logic in some other drivers, the scheme for
crypt_convert() and it's callback, kcryptd_async_done(), were not
correctly laid out to properly handle -EBUSY or -EINPROGRESS.
Fix this by using the completion for both -EBUSY and -EINPROGRESS. Now
crypt_convert()'s use of completion is comparable to
af_alg_wait_for_completion(). Similarly, kcryptd_async_done() follows
the pattern used in af_alg_complete().
Before this fix dm-crypt would lockup within 1-2 minutes running with
the CAAM driver. Fix was regression tested against software algorithms
on PPC32 and x86_64, and things seem perfectly happy there as well.
Signed-off-by: Ben Collins <ben.c@servergy.com>
Signed-off-by: Mike Snitzer <snitzer@redhat.com>
Cc: stable@vger.kernel.org
-rw-r--r-- | drivers/md/dm-crypt.c | 12 |
1 files changed, 6 insertions, 6 deletions
diff --git a/drivers/md/dm-crypt.c b/drivers/md/dm-crypt.c index aa1238facbeb..9b5e1eb0ffcf 100644 --- a/drivers/md/dm-crypt.c +++ b/drivers/md/dm-crypt.c | |||
@@ -925,11 +925,10 @@ static int crypt_convert(struct crypt_config *cc, | |||
925 | 925 | ||
926 | switch (r) { | 926 | switch (r) { |
927 | /* async */ | 927 | /* async */ |
928 | case -EINPROGRESS: | ||
928 | case -EBUSY: | 929 | case -EBUSY: |
929 | wait_for_completion(&ctx->restart); | 930 | wait_for_completion(&ctx->restart); |
930 | reinit_completion(&ctx->restart); | 931 | reinit_completion(&ctx->restart); |
931 | /* fall through*/ | ||
932 | case -EINPROGRESS: | ||
933 | ctx->req = NULL; | 932 | ctx->req = NULL; |
934 | ctx->cc_sector++; | 933 | ctx->cc_sector++; |
935 | continue; | 934 | continue; |
@@ -1346,10 +1345,8 @@ static void kcryptd_async_done(struct crypto_async_request *async_req, | |||
1346 | struct dm_crypt_io *io = container_of(ctx, struct dm_crypt_io, ctx); | 1345 | struct dm_crypt_io *io = container_of(ctx, struct dm_crypt_io, ctx); |
1347 | struct crypt_config *cc = io->cc; | 1346 | struct crypt_config *cc = io->cc; |
1348 | 1347 | ||
1349 | if (error == -EINPROGRESS) { | 1348 | if (error == -EINPROGRESS) |
1350 | complete(&ctx->restart); | ||
1351 | return; | 1349 | return; |
1352 | } | ||
1353 | 1350 | ||
1354 | if (!error && cc->iv_gen_ops && cc->iv_gen_ops->post) | 1351 | if (!error && cc->iv_gen_ops && cc->iv_gen_ops->post) |
1355 | error = cc->iv_gen_ops->post(cc, iv_of_dmreq(cc, dmreq), dmreq); | 1352 | error = cc->iv_gen_ops->post(cc, iv_of_dmreq(cc, dmreq), dmreq); |
@@ -1360,12 +1357,15 @@ static void kcryptd_async_done(struct crypto_async_request *async_req, | |||
1360 | crypt_free_req(cc, req_of_dmreq(cc, dmreq), io->base_bio); | 1357 | crypt_free_req(cc, req_of_dmreq(cc, dmreq), io->base_bio); |
1361 | 1358 | ||
1362 | if (!atomic_dec_and_test(&ctx->cc_pending)) | 1359 | if (!atomic_dec_and_test(&ctx->cc_pending)) |
1363 | return; | 1360 | goto done; |
1364 | 1361 | ||
1365 | if (bio_data_dir(io->base_bio) == READ) | 1362 | if (bio_data_dir(io->base_bio) == READ) |
1366 | kcryptd_crypt_read_done(io); | 1363 | kcryptd_crypt_read_done(io); |
1367 | else | 1364 | else |
1368 | kcryptd_crypt_write_io_submit(io, 1); | 1365 | kcryptd_crypt_write_io_submit(io, 1); |
1366 | done: | ||
1367 | if (!completion_done(&ctx->restart)) | ||
1368 | complete(&ctx->restart); | ||
1369 | } | 1369 | } |
1370 | 1370 | ||
1371 | static void kcryptd_crypt(struct work_struct *work) | 1371 | static void kcryptd_crypt(struct work_struct *work) |