diff options
author | Milan Broz <mbroz@redhat.com> | 2007-10-19 17:38:58 -0400 |
---|---|---|
committer | Alasdair G Kergon <agk@redhat.com> | 2007-10-19 21:01:14 -0400 |
commit | cabf08e4d3d1181d7c408edae97fb4d1c31518af (patch) | |
tree | b931cc050b8294a6f04790683e191e324268bad6 /drivers/md | |
parent | 9934a8bea2fc67e6f07d74304eca2a91d251bfe8 (diff) |
dm crypt: add post processing queue
Add post-processing queue (per crypt device) for read operations.
Current implementation uses only one queue for all operations
and this can lead to starvation caused by many requests waiting
for memory allocation. But the needed memory-releasing operation
is queued after these requests (in the same queue).
Signed-off-by: Milan Broz <mbroz@redhat.com>
Signed-off-by: Herbert Xu <herbert@gondor.apana.org.au>
Signed-off-by: Alasdair G Kergon <agk@redhat.com>
Diffstat (limited to 'drivers/md')
-rw-r--r-- | drivers/md/dm-crypt.c | 67 |
1 files changed, 51 insertions, 16 deletions
diff --git a/drivers/md/dm-crypt.c b/drivers/md/dm-crypt.c index 126ed21e6b17..357387fa10ca 100644 --- a/drivers/md/dm-crypt.c +++ b/drivers/md/dm-crypt.c | |||
@@ -36,7 +36,6 @@ struct dm_crypt_io { | |||
36 | struct work_struct work; | 36 | struct work_struct work; |
37 | atomic_t pending; | 37 | atomic_t pending; |
38 | int error; | 38 | int error; |
39 | int post_process; | ||
40 | }; | 39 | }; |
41 | 40 | ||
42 | /* | 41 | /* |
@@ -80,7 +79,8 @@ struct crypt_config { | |||
80 | mempool_t *page_pool; | 79 | mempool_t *page_pool; |
81 | struct bio_set *bs; | 80 | struct bio_set *bs; |
82 | 81 | ||
83 | struct workqueue_struct *queue; | 82 | struct workqueue_struct *io_queue; |
83 | struct workqueue_struct *crypt_queue; | ||
84 | /* | 84 | /* |
85 | * crypto related data | 85 | * crypto related data |
86 | */ | 86 | */ |
@@ -476,19 +476,36 @@ static void dec_pending(struct dm_crypt_io *io, int error) | |||
476 | } | 476 | } |
477 | 477 | ||
478 | /* | 478 | /* |
479 | * kcryptd: | 479 | * kcryptd/kcryptd_io: |
480 | * | 480 | * |
481 | * Needed because it would be very unwise to do decryption in an | 481 | * Needed because it would be very unwise to do decryption in an |
482 | * interrupt context. | 482 | * interrupt context. |
483 | * | ||
484 | * kcryptd performs the actual encryption or decryption. | ||
485 | * | ||
486 | * kcryptd_io performs the IO submission. | ||
487 | * | ||
488 | * They must be separated as otherwise the final stages could be | ||
489 | * starved by new requests which can block in the first stages due | ||
490 | * to memory allocation. | ||
483 | */ | 491 | */ |
484 | static void kcryptd_do_work(struct work_struct *work); | 492 | static void kcryptd_do_work(struct work_struct *work); |
493 | static void kcryptd_do_crypt(struct work_struct *work); | ||
485 | 494 | ||
486 | static void kcryptd_queue_io(struct dm_crypt_io *io) | 495 | static void kcryptd_queue_io(struct dm_crypt_io *io) |
487 | { | 496 | { |
488 | struct crypt_config *cc = io->target->private; | 497 | struct crypt_config *cc = io->target->private; |
489 | 498 | ||
490 | INIT_WORK(&io->work, kcryptd_do_work); | 499 | INIT_WORK(&io->work, kcryptd_do_work); |
491 | queue_work(cc->queue, &io->work); | 500 | queue_work(cc->io_queue, &io->work); |
501 | } | ||
502 | |||
503 | static void kcryptd_queue_crypt(struct dm_crypt_io *io) | ||
504 | { | ||
505 | struct crypt_config *cc = io->target->private; | ||
506 | |||
507 | INIT_WORK(&io->work, kcryptd_do_crypt); | ||
508 | queue_work(cc->crypt_queue, &io->work); | ||
492 | } | 509 | } |
493 | 510 | ||
494 | static void crypt_endio(struct bio *clone, int error) | 511 | static void crypt_endio(struct bio *clone, int error) |
@@ -511,8 +528,7 @@ static void crypt_endio(struct bio *clone, int error) | |||
511 | } | 528 | } |
512 | 529 | ||
513 | bio_put(clone); | 530 | bio_put(clone); |
514 | io->post_process = 1; | 531 | kcryptd_queue_crypt(io); |
515 | kcryptd_queue_io(io); | ||
516 | return; | 532 | return; |
517 | 533 | ||
518 | out: | 534 | out: |
@@ -634,10 +650,16 @@ static void kcryptd_do_work(struct work_struct *work) | |||
634 | { | 650 | { |
635 | struct dm_crypt_io *io = container_of(work, struct dm_crypt_io, work); | 651 | struct dm_crypt_io *io = container_of(work, struct dm_crypt_io, work); |
636 | 652 | ||
637 | if (io->post_process) | 653 | if (bio_data_dir(io->base_bio) == READ) |
638 | process_read_endio(io); | ||
639 | else if (bio_data_dir(io->base_bio) == READ) | ||
640 | process_read(io); | 654 | process_read(io); |
655 | } | ||
656 | |||
657 | static void kcryptd_do_crypt(struct work_struct *work) | ||
658 | { | ||
659 | struct dm_crypt_io *io = container_of(work, struct dm_crypt_io, work); | ||
660 | |||
661 | if (bio_data_dir(io->base_bio) == READ) | ||
662 | process_read_endio(io); | ||
641 | else | 663 | else |
642 | process_write(io); | 664 | process_write(io); |
643 | } | 665 | } |
@@ -870,16 +892,24 @@ static int crypt_ctr(struct dm_target *ti, unsigned int argc, char **argv) | |||
870 | } else | 892 | } else |
871 | cc->iv_mode = NULL; | 893 | cc->iv_mode = NULL; |
872 | 894 | ||
873 | cc->queue = create_singlethread_workqueue("kcryptd"); | 895 | cc->io_queue = create_singlethread_workqueue("kcryptd_io"); |
874 | if (!cc->queue) { | 896 | if (!cc->io_queue) { |
897 | ti->error = "Couldn't create kcryptd io queue"; | ||
898 | goto bad_io_queue; | ||
899 | } | ||
900 | |||
901 | cc->crypt_queue = create_singlethread_workqueue("kcryptd"); | ||
902 | if (!cc->crypt_queue) { | ||
875 | ti->error = "Couldn't create kcryptd queue"; | 903 | ti->error = "Couldn't create kcryptd queue"; |
876 | goto bad_queue; | 904 | goto bad_crypt_queue; |
877 | } | 905 | } |
878 | 906 | ||
879 | ti->private = cc; | 907 | ti->private = cc; |
880 | return 0; | 908 | return 0; |
881 | 909 | ||
882 | bad_queue: | 910 | bad_crypt_queue: |
911 | destroy_workqueue(cc->io_queue); | ||
912 | bad_io_queue: | ||
883 | kfree(cc->iv_mode); | 913 | kfree(cc->iv_mode); |
884 | bad_iv_mode: | 914 | bad_iv_mode: |
885 | dm_put_device(ti, cc->dev); | 915 | dm_put_device(ti, cc->dev); |
@@ -905,7 +935,8 @@ static void crypt_dtr(struct dm_target *ti) | |||
905 | { | 935 | { |
906 | struct crypt_config *cc = (struct crypt_config *) ti->private; | 936 | struct crypt_config *cc = (struct crypt_config *) ti->private; |
907 | 937 | ||
908 | destroy_workqueue(cc->queue); | 938 | destroy_workqueue(cc->io_queue); |
939 | destroy_workqueue(cc->crypt_queue); | ||
909 | 940 | ||
910 | bioset_free(cc->bs); | 941 | bioset_free(cc->bs); |
911 | mempool_destroy(cc->page_pool); | 942 | mempool_destroy(cc->page_pool); |
@@ -931,9 +962,13 @@ static int crypt_map(struct dm_target *ti, struct bio *bio, | |||
931 | io = mempool_alloc(cc->io_pool, GFP_NOIO); | 962 | io = mempool_alloc(cc->io_pool, GFP_NOIO); |
932 | io->target = ti; | 963 | io->target = ti; |
933 | io->base_bio = bio; | 964 | io->base_bio = bio; |
934 | io->error = io->post_process = 0; | 965 | io->error = 0; |
935 | atomic_set(&io->pending, 0); | 966 | atomic_set(&io->pending, 0); |
936 | kcryptd_queue_io(io); | 967 | |
968 | if (bio_data_dir(io->base_bio) == READ) | ||
969 | kcryptd_queue_io(io); | ||
970 | else | ||
971 | kcryptd_queue_crypt(io); | ||
937 | 972 | ||
938 | return DM_MAPIO_SUBMITTED; | 973 | return DM_MAPIO_SUBMITTED; |
939 | } | 974 | } |