aboutsummaryrefslogtreecommitdiffstats
path: root/drivers
diff options
context:
space:
mode:
authorMilan Broz <mbroz@redhat.com>2006-10-03 04:15:39 -0400
committerLinus Torvalds <torvalds@g5.osdl.org>2006-10-03 11:04:16 -0400
commit23541d2d288cdb54f417ba1001dacc7f3ea10a97 (patch)
treec338a829548fd1baa730acf6e466f8bfd4bef474 /drivers
parent93e605c237a61f5a0ea37b12353392f01d596628 (diff)
[PATCH] dm crypt: move io to workqueue
This patch is designed to help dm-crypt comply with the new constraints imposed by the following patch in -mm: md-dm-reduce-stack-usage-with-stacked-block-devices.patch Under low memory the existing implementation relies upon waiting for I/O submitted recursively to generic_make_request() completing before the original generic_make_request() call can return. This patch moves the I/O submission to a workqueue so the original generic_make_request() can return immediately. Signed-off-by: Milan Broz <mbroz@redhat.com> Signed-off-by: Alasdair G Kergon <agk@redhat.com> Signed-off-by: Andrew Morton <akpm@osdl.org> Signed-off-by: Linus Torvalds <torvalds@osdl.org>
Diffstat (limited to 'drivers')
-rw-r--r--drivers/md/dm-crypt.c63
1 files changed, 27 insertions, 36 deletions
diff --git a/drivers/md/dm-crypt.c b/drivers/md/dm-crypt.c
index 946a9ebc89db..c34433a6edd0 100644
--- a/drivers/md/dm-crypt.c
+++ b/drivers/md/dm-crypt.c
@@ -35,6 +35,7 @@ struct crypt_io {
35 struct work_struct work; 35 struct work_struct work;
36 atomic_t pending; 36 atomic_t pending;
37 int error; 37 int error;
38 int post_process;
38}; 39};
39 40
40/* 41/*
@@ -445,8 +446,7 @@ static void dec_pending(struct crypt_io *io, int error)
445 * kcryptd: 446 * kcryptd:
446 * 447 *
447 * Needed because it would be very unwise to do decryption in an 448 * Needed because it would be very unwise to do decryption in an
448 * interrupt context, so bios returning from read requests get 449 * interrupt context.
449 * queued here.
450 */ 450 */
451static struct workqueue_struct *_kcryptd_workqueue; 451static struct workqueue_struct *_kcryptd_workqueue;
452static void kcryptd_do_work(void *data); 452static void kcryptd_do_work(void *data);
@@ -470,12 +470,10 @@ static int crypt_endio(struct bio *clone, unsigned int done, int error)
470 if (!read_io) 470 if (!read_io)
471 crypt_free_buffer_pages(cc, clone, done); 471 crypt_free_buffer_pages(cc, clone, done);
472 472
473 /* keep going - not finished yet */
473 if (unlikely(clone->bi_size)) 474 if (unlikely(clone->bi_size))
474 return 1; 475 return 1;
475 476
476 /*
477 * successful reads are decrypted by the worker thread
478 */
479 if (!read_io) 477 if (!read_io)
480 goto out; 478 goto out;
481 479
@@ -485,6 +483,7 @@ static int crypt_endio(struct bio *clone, unsigned int done, int error)
485 } 483 }
486 484
487 bio_put(clone); 485 bio_put(clone);
486 io->post_process = 1;
488 kcryptd_queue_io(io); 487 kcryptd_queue_io(io);
489 return 0; 488 return 0;
490 489
@@ -504,7 +503,7 @@ static void clone_init(struct crypt_io *io, struct bio *clone)
504 clone->bi_rw = io->base_bio->bi_rw; 503 clone->bi_rw = io->base_bio->bi_rw;
505} 504}
506 505
507static int process_read(struct crypt_io *io) 506static void process_read(struct crypt_io *io)
508{ 507{
509 struct crypt_config *cc = io->target->private; 508 struct crypt_config *cc = io->target->private;
510 struct bio *base_bio = io->base_bio; 509 struct bio *base_bio = io->base_bio;
@@ -521,7 +520,7 @@ static int process_read(struct crypt_io *io)
521 clone = bio_alloc(GFP_NOIO, bio_segments(base_bio)); 520 clone = bio_alloc(GFP_NOIO, bio_segments(base_bio));
522 if (unlikely(!clone)) { 521 if (unlikely(!clone)) {
523 dec_pending(io, -ENOMEM); 522 dec_pending(io, -ENOMEM);
524 return 0; 523 return;
525 } 524 }
526 525
527 clone_init(io, clone); 526 clone_init(io, clone);
@@ -533,11 +532,9 @@ static int process_read(struct crypt_io *io)
533 sizeof(struct bio_vec) * clone->bi_vcnt); 532 sizeof(struct bio_vec) * clone->bi_vcnt);
534 533
535 generic_make_request(clone); 534 generic_make_request(clone);
536
537 return 0;
538} 535}
539 536
540static int process_write(struct crypt_io *io) 537static void process_write(struct crypt_io *io)
541{ 538{
542 struct crypt_config *cc = io->target->private; 539 struct crypt_config *cc = io->target->private;
543 struct bio *base_bio = io->base_bio; 540 struct bio *base_bio = io->base_bio;
@@ -558,15 +555,18 @@ static int process_write(struct crypt_io *io)
558 while (remaining) { 555 while (remaining) {
559 clone = crypt_alloc_buffer(cc, base_bio->bi_size, 556 clone = crypt_alloc_buffer(cc, base_bio->bi_size,
560 io->first_clone, &bvec_idx); 557 io->first_clone, &bvec_idx);
561 if (unlikely(!clone)) 558 if (unlikely(!clone)) {
562 goto cleanup; 559 dec_pending(io, -ENOMEM);
560 return;
561 }
563 562
564 ctx.bio_out = clone; 563 ctx.bio_out = clone;
565 564
566 if (unlikely(crypt_convert(cc, &ctx) < 0)) { 565 if (unlikely(crypt_convert(cc, &ctx) < 0)) {
567 crypt_free_buffer_pages(cc, clone, clone->bi_size); 566 crypt_free_buffer_pages(cc, clone, clone->bi_size);
568 bio_put(clone); 567 bio_put(clone);
569 goto cleanup; 568 dec_pending(io, -EIO);
569 return;
570 } 570 }
571 571
572 clone_init(io, clone); 572 clone_init(io, clone);
@@ -582,31 +582,20 @@ static int process_write(struct crypt_io *io)
582 io->first_clone = clone; 582 io->first_clone = clone;
583 } 583 }
584 584
585 atomic_inc(&io->pending);
586
587 remaining -= clone->bi_size; 585 remaining -= clone->bi_size;
588 sector += bio_sectors(clone); 586 sector += bio_sectors(clone);
589 587
588 /* prevent bio_put of first_clone */
589 if (remaining)
590 atomic_inc(&io->pending);
591
590 generic_make_request(clone); 592 generic_make_request(clone);
591 593
592 /* out of memory -> run queues */ 594 /* out of memory -> run queues */
593 if (remaining) 595 if (remaining)
594 blk_congestion_wait(bio_data_dir(clone), HZ/100); 596 blk_congestion_wait(bio_data_dir(clone), HZ/100);
595 }
596 597
597 /* drop reference, clones could have returned before we reach this */
598 dec_pending(io, 0);
599 return 0;
600
601cleanup:
602 if (io->first_clone) {
603 dec_pending(io, -ENOMEM);
604 return 0;
605 } 598 }
606
607 /* if no bio has been dispatched yet, we can directly return the error */
608 mempool_free(io, cc->io_pool);
609 return -ENOMEM;
610} 599}
611 600
612static void process_read_endio(struct crypt_io *io) 601static void process_read_endio(struct crypt_io *io)
@@ -624,7 +613,12 @@ static void kcryptd_do_work(void *data)
624{ 613{
625 struct crypt_io *io = data; 614 struct crypt_io *io = data;
626 615
627 process_read_endio(io); 616 if (io->post_process)
617 process_read_endio(io);
618 else if (bio_data_dir(io->base_bio) == READ)
619 process_read(io);
620 else
621 process_write(io);
628} 622}
629 623
630/* 624/*
@@ -889,17 +883,14 @@ static int crypt_map(struct dm_target *ti, struct bio *bio,
889 struct crypt_io *io; 883 struct crypt_io *io;
890 884
891 io = mempool_alloc(cc->io_pool, GFP_NOIO); 885 io = mempool_alloc(cc->io_pool, GFP_NOIO);
892
893 io->target = ti; 886 io->target = ti;
894 io->base_bio = bio; 887 io->base_bio = bio;
895 io->first_clone = NULL; 888 io->first_clone = NULL;
896 io->error = 0; 889 io->error = io->post_process = 0;
897 atomic_set(&io->pending, 0); 890 atomic_set(&io->pending, 0);
891 kcryptd_queue_io(io);
898 892
899 if (bio_data_dir(bio) == WRITE) 893 return 0;
900 return process_write(io);
901
902 return process_read(io);
903} 894}
904 895
905static int crypt_status(struct dm_target *ti, status_type_t type, 896static int crypt_status(struct dm_target *ti, status_type_t type,
@@ -999,7 +990,7 @@ error:
999 990
1000static struct target_type crypt_target = { 991static struct target_type crypt_target = {
1001 .name = "crypt", 992 .name = "crypt",
1002 .version= {1, 2, 0}, 993 .version= {1, 3, 0},
1003 .module = THIS_MODULE, 994 .module = THIS_MODULE,
1004 .ctr = crypt_ctr, 995 .ctr = crypt_ctr,
1005 .dtr = crypt_dtr, 996 .dtr = crypt_dtr,