aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/md/dm-crypt.c
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/md/dm-crypt.c')
-rw-r--r--drivers/md/dm-crypt.c54
1 files changed, 26 insertions, 28 deletions
diff --git a/drivers/md/dm-crypt.c b/drivers/md/dm-crypt.c
index 5b83204b6594..ccc2fe19db86 100644
--- a/drivers/md/dm-crypt.c
+++ b/drivers/md/dm-crypt.c
@@ -110,6 +110,7 @@ struct crypt_config {
110static struct kmem_cache *_crypt_io_pool; 110static struct kmem_cache *_crypt_io_pool;
111 111
112static void clone_init(struct dm_crypt_io *, struct bio *); 112static void clone_init(struct dm_crypt_io *, struct bio *);
113static void kcryptd_queue_crypt(struct dm_crypt_io *io);
113 114
114/* 115/*
115 * Different IV generation algorithms: 116 * Different IV generation algorithms:
@@ -481,25 +482,6 @@ static void crypt_dec_pending(struct dm_crypt_io *io)
481 * starved by new requests which can block in the first stages due 482 * starved by new requests which can block in the first stages due
482 * to memory allocation. 483 * to memory allocation.
483 */ 484 */
484static void kcryptd_io(struct work_struct *work);
485static void kcryptd_crypt(struct work_struct *work);
486
487static void kcryptd_queue_io(struct dm_crypt_io *io)
488{
489 struct crypt_config *cc = io->target->private;
490
491 INIT_WORK(&io->work, kcryptd_io);
492 queue_work(cc->io_queue, &io->work);
493}
494
495static void kcryptd_queue_crypt(struct dm_crypt_io *io)
496{
497 struct crypt_config *cc = io->target->private;
498
499 INIT_WORK(&io->work, kcryptd_crypt);
500 queue_work(cc->crypt_queue, &io->work);
501}
502
503static void crypt_endio(struct bio *clone, int error) 485static void crypt_endio(struct bio *clone, int error)
504{ 486{
505 struct dm_crypt_io *io = clone->bi_private; 487 struct dm_crypt_io *io = clone->bi_private;
@@ -575,6 +557,24 @@ static void kcryptd_io_write(struct dm_crypt_io *io)
575{ 557{
576} 558}
577 559
560static void kcryptd_io(struct work_struct *work)
561{
562 struct dm_crypt_io *io = container_of(work, struct dm_crypt_io, work);
563
564 if (bio_data_dir(io->base_bio) == READ)
565 kcryptd_io_read(io);
566 else
567 kcryptd_io_write(io);
568}
569
570static void kcryptd_queue_io(struct dm_crypt_io *io)
571{
572 struct crypt_config *cc = io->target->private;
573
574 INIT_WORK(&io->work, kcryptd_io);
575 queue_work(cc->io_queue, &io->work);
576}
577
578static void kcryptd_crypt_write_io_submit(struct dm_crypt_io *io, int error) 578static void kcryptd_crypt_write_io_submit(struct dm_crypt_io *io, int error)
579{ 579{
580} 580}
@@ -658,24 +658,22 @@ static void kcryptd_crypt_read_convert(struct dm_crypt_io *io)
658 kcryptd_crypt_read_done(io, r); 658 kcryptd_crypt_read_done(io, r);
659} 659}
660 660
661static void kcryptd_io(struct work_struct *work) 661static void kcryptd_crypt(struct work_struct *work)
662{ 662{
663 struct dm_crypt_io *io = container_of(work, struct dm_crypt_io, work); 663 struct dm_crypt_io *io = container_of(work, struct dm_crypt_io, work);
664 664
665 if (bio_data_dir(io->base_bio) == READ) 665 if (bio_data_dir(io->base_bio) == READ)
666 kcryptd_io_read(io); 666 kcryptd_crypt_read_convert(io);
667 else 667 else
668 kcryptd_io_write(io); 668 kcryptd_crypt_write_convert(io);
669} 669}
670 670
671static void kcryptd_crypt(struct work_struct *work) 671static void kcryptd_queue_crypt(struct dm_crypt_io *io)
672{ 672{
673 struct dm_crypt_io *io = container_of(work, struct dm_crypt_io, work); 673 struct crypt_config *cc = io->target->private;
674 674
675 if (bio_data_dir(io->base_bio) == READ) 675 INIT_WORK(&io->work, kcryptd_crypt);
676 kcryptd_crypt_read_convert(io); 676 queue_work(cc->crypt_queue, &io->work);
677 else
678 kcryptd_crypt_write_convert(io);
679} 677}
680 678
681/* 679/*