aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/md/dm-crypt.c
diff options
context:
space:
mode:
authorMilan Broz <mbroz@redhat.com>2008-03-28 17:16:07 -0400
committerLinus Torvalds <torvalds@linux-foundation.org>2008-03-28 17:45:22 -0400
commit3f1e9070f63b0eecadfa059959bf7c9dbe835962 (patch)
tree1d47de4bdae1009b6efc80824ca882de0a97cfef /drivers/md/dm-crypt.c
parentab473a52500528a276920a26cb35b722a868ab6e (diff)
dm crypt: fix ctx pending
Fix regression in dm-crypt introduced in commit 3a7f6c990ad04e6f576a159876c602d14d6f7fef ("dm crypt: use async crypto"). If write requests need to be split into pieces, the code must not process them in parallel because the crypto context cannot be shared. So there can be parallel crypto operations on one part of the write, but only one write bio can be processed at a time. This is not optimal and the workqueue code needs to be optimized for parallel processing, but for now it solves the problem without affecting the performance of synchronous crypto operation (most of current dm-crypt users). http://bugzilla.kernel.org/show_bug.cgi?id=10242 http://bugzilla.kernel.org/show_bug.cgi?id=10207 Signed-off-by: Milan Broz <mbroz@redhat.com> Signed-off-by: Alasdair G Kergon <agk@redhat.com> Cc: "Rafael J. Wysocki" <rjw@sisk.pl> Signed-off-by: Andrew Morton <akpm@linux-foundation.org> Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
Diffstat (limited to 'drivers/md/dm-crypt.c')
-rw-r--r--drivers/md/dm-crypt.c58
1 files changed, 30 insertions, 28 deletions
diff --git a/drivers/md/dm-crypt.c b/drivers/md/dm-crypt.c
index b04f98df94ea..835def11419d 100644
--- a/drivers/md/dm-crypt.c
+++ b/drivers/md/dm-crypt.c
@@ -1,7 +1,7 @@
1/* 1/*
2 * Copyright (C) 2003 Christophe Saout <christophe@saout.de> 2 * Copyright (C) 2003 Christophe Saout <christophe@saout.de>
3 * Copyright (C) 2004 Clemens Fruhwirth <clemens@endorphin.org> 3 * Copyright (C) 2004 Clemens Fruhwirth <clemens@endorphin.org>
4 * Copyright (C) 2006-2007 Red Hat, Inc. All rights reserved. 4 * Copyright (C) 2006-2008 Red Hat, Inc. All rights reserved.
5 * 5 *
6 * This file is released under the GPL. 6 * This file is released under the GPL.
7 */ 7 */
@@ -93,6 +93,8 @@ struct crypt_config {
93 93
94 struct workqueue_struct *io_queue; 94 struct workqueue_struct *io_queue;
95 struct workqueue_struct *crypt_queue; 95 struct workqueue_struct *crypt_queue;
96 wait_queue_head_t writeq;
97
96 /* 98 /*
97 * crypto related data 99 * crypto related data
98 */ 100 */
@@ -331,14 +333,7 @@ static void crypt_convert_init(struct crypt_config *cc,
331 ctx->idx_out = bio_out ? bio_out->bi_idx : 0; 333 ctx->idx_out = bio_out ? bio_out->bi_idx : 0;
332 ctx->sector = sector + cc->iv_offset; 334 ctx->sector = sector + cc->iv_offset;
333 init_completion(&ctx->restart); 335 init_completion(&ctx->restart);
334 /* 336 atomic_set(&ctx->pending, 1);
335 * Crypto operation can be asynchronous,
336 * ctx->pending is increased after request submission.
337 * We need to ensure that we don't call the crypt finish
338 * operation before pending got incremented
339 * (dependent on crypt submission return code).
340 */
341 atomic_set(&ctx->pending, 2);
342} 337}
343 338
344static int crypt_convert_block(struct crypt_config *cc, 339static int crypt_convert_block(struct crypt_config *cc,
@@ -411,43 +406,42 @@ static void crypt_alloc_req(struct crypt_config *cc,
411static int crypt_convert(struct crypt_config *cc, 406static int crypt_convert(struct crypt_config *cc,
412 struct convert_context *ctx) 407 struct convert_context *ctx)
413{ 408{
414 int r = 0; 409 int r;
415 410
416 while(ctx->idx_in < ctx->bio_in->bi_vcnt && 411 while(ctx->idx_in < ctx->bio_in->bi_vcnt &&
417 ctx->idx_out < ctx->bio_out->bi_vcnt) { 412 ctx->idx_out < ctx->bio_out->bi_vcnt) {
418 413
419 crypt_alloc_req(cc, ctx); 414 crypt_alloc_req(cc, ctx);
420 415
416 atomic_inc(&ctx->pending);
417
421 r = crypt_convert_block(cc, ctx, cc->req); 418 r = crypt_convert_block(cc, ctx, cc->req);
422 419
423 switch (r) { 420 switch (r) {
421 /* async */
424 case -EBUSY: 422 case -EBUSY:
425 wait_for_completion(&ctx->restart); 423 wait_for_completion(&ctx->restart);
426 INIT_COMPLETION(ctx->restart); 424 INIT_COMPLETION(ctx->restart);
427 /* fall through*/ 425 /* fall through*/
428 case -EINPROGRESS: 426 case -EINPROGRESS:
429 atomic_inc(&ctx->pending);
430 cc->req = NULL; 427 cc->req = NULL;
431 r = 0; 428 ctx->sector++;
432 /* fall through*/ 429 continue;
430
431 /* sync */
433 case 0: 432 case 0:
433 atomic_dec(&ctx->pending);
434 ctx->sector++; 434 ctx->sector++;
435 continue; 435 continue;
436 }
437 436
438 break; 437 /* error */
438 default:
439 atomic_dec(&ctx->pending);
440 return r;
441 }
439 } 442 }
440 443
441 /* 444 return 0;
442 * If there are pending crypto operation run async
443 * code. Otherwise process return code synchronously.
444 * The step of 2 ensures that async finish doesn't
445 * call crypto finish too early.
446 */
447 if (atomic_sub_return(2, &ctx->pending))
448 return -EINPROGRESS;
449
450 return r;
451} 445}
452 446
453static void dm_crypt_bio_destructor(struct bio *bio) 447static void dm_crypt_bio_destructor(struct bio *bio)
@@ -624,8 +618,10 @@ static void kcryptd_io_read(struct dm_crypt_io *io)
624static void kcryptd_io_write(struct dm_crypt_io *io) 618static void kcryptd_io_write(struct dm_crypt_io *io)
625{ 619{
626 struct bio *clone = io->ctx.bio_out; 620 struct bio *clone = io->ctx.bio_out;
621 struct crypt_config *cc = io->target->private;
627 622
628 generic_make_request(clone); 623 generic_make_request(clone);
624 wake_up(&cc->writeq);
629} 625}
630 626
631static void kcryptd_io(struct work_struct *work) 627static void kcryptd_io(struct work_struct *work)
@@ -698,7 +694,8 @@ static void kcryptd_crypt_write_convert_loop(struct dm_crypt_io *io)
698 694
699 r = crypt_convert(cc, &io->ctx); 695 r = crypt_convert(cc, &io->ctx);
700 696
701 if (r != -EINPROGRESS) { 697 if (atomic_dec_and_test(&io->ctx.pending)) {
698 /* processed, no running async crypto */
702 kcryptd_crypt_write_io_submit(io, r, 0); 699 kcryptd_crypt_write_io_submit(io, r, 0);
703 if (unlikely(r < 0)) 700 if (unlikely(r < 0))
704 return; 701 return;
@@ -706,8 +703,12 @@ static void kcryptd_crypt_write_convert_loop(struct dm_crypt_io *io)
706 atomic_inc(&io->pending); 703 atomic_inc(&io->pending);
707 704
708 /* out of memory -> run queues */ 705 /* out of memory -> run queues */
709 if (unlikely(remaining)) 706 if (unlikely(remaining)) {
707 /* wait for async crypto then reinitialize pending */
708 wait_event(cc->writeq, !atomic_read(&io->ctx.pending));
709 atomic_set(&io->ctx.pending, 1);
710 congestion_wait(WRITE, HZ/100); 710 congestion_wait(WRITE, HZ/100);
711 }
711 } 712 }
712} 713}
713 714
@@ -746,7 +747,7 @@ static void kcryptd_crypt_read_convert(struct dm_crypt_io *io)
746 747
747 r = crypt_convert(cc, &io->ctx); 748 r = crypt_convert(cc, &io->ctx);
748 749
749 if (r != -EINPROGRESS) 750 if (atomic_dec_and_test(&io->ctx.pending))
750 kcryptd_crypt_read_done(io, r); 751 kcryptd_crypt_read_done(io, r);
751 752
752 crypt_dec_pending(io); 753 crypt_dec_pending(io);
@@ -1047,6 +1048,7 @@ static int crypt_ctr(struct dm_target *ti, unsigned int argc, char **argv)
1047 goto bad_crypt_queue; 1048 goto bad_crypt_queue;
1048 } 1049 }
1049 1050
1051 init_waitqueue_head(&cc->writeq);
1050 ti->private = cc; 1052 ti->private = cc;
1051 return 0; 1053 return 0;
1052 1054