aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
-rw-r--r--drivers/md/dm-crypt.c279
1 files changed, 156 insertions, 123 deletions
diff --git a/drivers/md/dm-crypt.c b/drivers/md/dm-crypt.c
index e1e8040f451a..3783cf978850 100644
--- a/drivers/md/dm-crypt.c
+++ b/drivers/md/dm-crypt.c
@@ -30,7 +30,7 @@
30 */ 30 */
31struct crypt_io { 31struct crypt_io {
32 struct dm_target *target; 32 struct dm_target *target;
33 struct bio *bio; 33 struct bio *base_bio;
34 struct bio *first_clone; 34 struct bio *first_clone;
35 struct work_struct work; 35 struct work_struct work;
36 atomic_t pending; 36 atomic_t pending;
@@ -319,7 +319,7 @@ static struct bio *
319crypt_alloc_buffer(struct crypt_config *cc, unsigned int size, 319crypt_alloc_buffer(struct crypt_config *cc, unsigned int size,
320 struct bio *base_bio, unsigned int *bio_vec_idx) 320 struct bio *base_bio, unsigned int *bio_vec_idx)
321{ 321{
322 struct bio *bio; 322 struct bio *clone;
323 unsigned int nr_iovecs = (size + PAGE_SIZE - 1) >> PAGE_SHIFT; 323 unsigned int nr_iovecs = (size + PAGE_SIZE - 1) >> PAGE_SHIFT;
324 gfp_t gfp_mask = GFP_NOIO | __GFP_HIGHMEM; 324 gfp_t gfp_mask = GFP_NOIO | __GFP_HIGHMEM;
325 unsigned int i; 325 unsigned int i;
@@ -330,23 +330,23 @@ crypt_alloc_buffer(struct crypt_config *cc, unsigned int size,
330 * FIXME: Is this really intelligent? 330 * FIXME: Is this really intelligent?
331 */ 331 */
332 if (base_bio) 332 if (base_bio)
333 bio = bio_clone(base_bio, GFP_NOIO|__GFP_NOMEMALLOC); 333 clone = bio_clone(base_bio, GFP_NOIO|__GFP_NOMEMALLOC);
334 else 334 else
335 bio = bio_alloc(GFP_NOIO|__GFP_NOMEMALLOC, nr_iovecs); 335 clone = bio_alloc(GFP_NOIO|__GFP_NOMEMALLOC, nr_iovecs);
336 if (!bio) 336 if (!clone)
337 return NULL; 337 return NULL;
338 338
339 /* if the last bio was not complete, continue where that one ended */ 339 /* if the last bio was not complete, continue where that one ended */
340 bio->bi_idx = *bio_vec_idx; 340 clone->bi_idx = *bio_vec_idx;
341 bio->bi_vcnt = *bio_vec_idx; 341 clone->bi_vcnt = *bio_vec_idx;
342 bio->bi_size = 0; 342 clone->bi_size = 0;
343 bio->bi_flags &= ~(1 << BIO_SEG_VALID); 343 clone->bi_flags &= ~(1 << BIO_SEG_VALID);
344 344
345 /* bio->bi_idx pages have already been allocated */ 345 /* clone->bi_idx pages have already been allocated */
346 size -= bio->bi_idx * PAGE_SIZE; 346 size -= clone->bi_idx * PAGE_SIZE;
347 347
348 for(i = bio->bi_idx; i < nr_iovecs; i++) { 348 for (i = clone->bi_idx; i < nr_iovecs; i++) {
349 struct bio_vec *bv = bio_iovec_idx(bio, i); 349 struct bio_vec *bv = bio_iovec_idx(clone, i);
350 350
351 bv->bv_page = mempool_alloc(cc->page_pool, gfp_mask); 351 bv->bv_page = mempool_alloc(cc->page_pool, gfp_mask);
352 if (!bv->bv_page) 352 if (!bv->bv_page)
@@ -357,7 +357,7 @@ crypt_alloc_buffer(struct crypt_config *cc, unsigned int size,
357 * return a partially allocated bio, the caller will then try 357 * return a partially allocated bio, the caller will then try
358 * to allocate additional bios while submitting this partial bio 358 * to allocate additional bios while submitting this partial bio
359 */ 359 */
360 if ((i - bio->bi_idx) == (MIN_BIO_PAGES - 1)) 360 if ((i - clone->bi_idx) == (MIN_BIO_PAGES - 1))
361 gfp_mask = (gfp_mask | __GFP_NOWARN) & ~__GFP_WAIT; 361 gfp_mask = (gfp_mask | __GFP_NOWARN) & ~__GFP_WAIT;
362 362
363 bv->bv_offset = 0; 363 bv->bv_offset = 0;
@@ -366,13 +366,13 @@ crypt_alloc_buffer(struct crypt_config *cc, unsigned int size,
366 else 366 else
367 bv->bv_len = size; 367 bv->bv_len = size;
368 368
369 bio->bi_size += bv->bv_len; 369 clone->bi_size += bv->bv_len;
370 bio->bi_vcnt++; 370 clone->bi_vcnt++;
371 size -= bv->bv_len; 371 size -= bv->bv_len;
372 } 372 }
373 373
374 if (!bio->bi_size) { 374 if (!clone->bi_size) {
375 bio_put(bio); 375 bio_put(clone);
376 return NULL; 376 return NULL;
377 } 377 }
378 378
@@ -380,13 +380,13 @@ crypt_alloc_buffer(struct crypt_config *cc, unsigned int size,
380 * Remember the last bio_vec allocated to be able 380 * Remember the last bio_vec allocated to be able
381 * to correctly continue after the splitting. 381 * to correctly continue after the splitting.
382 */ 382 */
383 *bio_vec_idx = bio->bi_vcnt; 383 *bio_vec_idx = clone->bi_vcnt;
384 384
385 return bio; 385 return clone;
386} 386}
387 387
388static void crypt_free_buffer_pages(struct crypt_config *cc, 388static void crypt_free_buffer_pages(struct crypt_config *cc,
389 struct bio *bio, unsigned int bytes) 389 struct bio *clone, unsigned int bytes)
390{ 390{
391 unsigned int i, start, end; 391 unsigned int i, start, end;
392 struct bio_vec *bv; 392 struct bio_vec *bv;
@@ -400,19 +400,19 @@ static void crypt_free_buffer_pages(struct crypt_config *cc,
400 * A fix to the bi_idx issue in the kernel is in the works, so 400 * A fix to the bi_idx issue in the kernel is in the works, so
401 * we will hopefully be able to revert to the cleaner solution soon. 401 * we will hopefully be able to revert to the cleaner solution soon.
402 */ 402 */
403 i = bio->bi_vcnt - 1; 403 i = clone->bi_vcnt - 1;
404 bv = bio_iovec_idx(bio, i); 404 bv = bio_iovec_idx(clone, i);
405 end = (i << PAGE_SHIFT) + (bv->bv_offset + bv->bv_len) - bio->bi_size; 405 end = (i << PAGE_SHIFT) + (bv->bv_offset + bv->bv_len) - clone->bi_size;
406 start = end - bytes; 406 start = end - bytes;
407 407
408 start >>= PAGE_SHIFT; 408 start >>= PAGE_SHIFT;
409 if (!bio->bi_size) 409 if (!clone->bi_size)
410 end = bio->bi_vcnt; 410 end = clone->bi_vcnt;
411 else 411 else
412 end >>= PAGE_SHIFT; 412 end >>= PAGE_SHIFT;
413 413
414 for(i = start; i < end; i++) { 414 for (i = start; i < end; i++) {
415 bv = bio_iovec_idx(bio, i); 415 bv = bio_iovec_idx(clone, i);
416 BUG_ON(!bv->bv_page); 416 BUG_ON(!bv->bv_page);
417 mempool_free(bv->bv_page, cc->page_pool); 417 mempool_free(bv->bv_page, cc->page_pool);
418 bv->bv_page = NULL; 418 bv->bv_page = NULL;
@@ -436,7 +436,7 @@ static void dec_pending(struct crypt_io *io, int error)
436 if (io->first_clone) 436 if (io->first_clone)
437 bio_put(io->first_clone); 437 bio_put(io->first_clone);
438 438
439 bio_endio(io->bio, io->bio->bi_size, io->error); 439 bio_endio(io->base_bio, io->base_bio->bi_size, io->error);
440 440
441 mempool_free(io, cc->io_pool); 441 mempool_free(io, cc->io_pool);
442} 442}
@@ -449,25 +449,133 @@ static void dec_pending(struct crypt_io *io, int error)
449 * queued here. 449 * queued here.
450 */ 450 */
451static struct workqueue_struct *_kcryptd_workqueue; 451static struct workqueue_struct *_kcryptd_workqueue;
452static void kcryptd_do_work(void *data);
452 453
453static void kcryptd_do_work(void *data) 454static void kcryptd_queue_io(struct crypt_io *io)
454{ 455{
455 struct crypt_io *io = (struct crypt_io *) data; 456 INIT_WORK(&io->work, kcryptd_do_work, io);
456 struct crypt_config *cc = (struct crypt_config *) io->target->private; 457 queue_work(_kcryptd_workqueue, &io->work);
458}
459
460static int crypt_endio(struct bio *clone, unsigned int done, int error)
461{
462 struct crypt_io *io = clone->bi_private;
463 struct crypt_config *cc = io->target->private;
464 unsigned read_io = bio_data_dir(clone) == READ;
465
466 /*
467 * free the processed pages, even if
468 * it's only a partially completed write
469 */
470 if (!read_io)
471 crypt_free_buffer_pages(cc, clone, done);
472
473 if (unlikely(clone->bi_size))
474 return 1;
475
476 /*
477 * successful reads are decrypted by the worker thread
478 */
479 if (!read_io)
480 goto out;
481
482 if (unlikely(!bio_flagged(clone, BIO_UPTODATE))) {
483 error = -EIO;
484 goto out;
485 }
486
487 bio_put(clone);
488 kcryptd_queue_io(io);
489 return 0;
490
491out:
492 bio_put(clone);
493 dec_pending(io, error);
494 return error;
495}
496
497static void clone_init(struct crypt_io *io, struct bio *clone)
498{
499 struct crypt_config *cc = io->target->private;
500
501 clone->bi_private = io;
502 clone->bi_end_io = crypt_endio;
503 clone->bi_bdev = cc->dev->bdev;
504 clone->bi_rw = io->base_bio->bi_rw;
505}
506
507static struct bio *clone_read(struct crypt_io *io,
508 sector_t sector)
509{
510 struct crypt_config *cc = io->target->private;
511 struct bio *base_bio = io->base_bio;
512 struct bio *clone;
513
514 /*
515 * The block layer might modify the bvec array, so always
516 * copy the required bvecs because we need the original
517 * one in order to decrypt the whole bio data *afterwards*.
518 */
519 clone = bio_alloc(GFP_NOIO, bio_segments(base_bio));
520 if (unlikely(!clone))
521 return NULL;
522
523 clone_init(io, clone);
524 clone->bi_idx = 0;
525 clone->bi_vcnt = bio_segments(base_bio);
526 clone->bi_size = base_bio->bi_size;
527 memcpy(clone->bi_io_vec, bio_iovec(base_bio),
528 sizeof(struct bio_vec) * clone->bi_vcnt);
529 clone->bi_sector = cc->start + sector;
530
531 return clone;
532}
533
534static struct bio *clone_write(struct crypt_io *io,
535 sector_t sector,
536 unsigned *bvec_idx,
537 struct convert_context *ctx)
538{
539 struct crypt_config *cc = io->target->private;
540 struct bio *base_bio = io->base_bio;
541 struct bio *clone;
542
543 clone = crypt_alloc_buffer(cc, base_bio->bi_size,
544 io->first_clone, bvec_idx);
545 if (!clone)
546 return NULL;
547
548 ctx->bio_out = clone;
549
550 if (unlikely(crypt_convert(cc, ctx) < 0)) {
551 crypt_free_buffer_pages(cc, clone,
552 clone->bi_size);
553 bio_put(clone);
554 return NULL;
555 }
556
557 clone_init(io, clone);
558 clone->bi_sector = cc->start + sector;
559
560 return clone;
561}
562
563static void process_read_endio(struct crypt_io *io)
564{
565 struct crypt_config *cc = io->target->private;
457 struct convert_context ctx; 566 struct convert_context ctx;
458 int r;
459 567
460 crypt_convert_init(cc, &ctx, io->bio, io->bio, 568 crypt_convert_init(cc, &ctx, io->base_bio, io->base_bio,
461 io->bio->bi_sector - io->target->begin, 0); 569 io->base_bio->bi_sector - io->target->begin, 0);
462 r = crypt_convert(cc, &ctx);
463 570
464 dec_pending(io, r); 571 dec_pending(io, crypt_convert(cc, &ctx));
465} 572}
466 573
467static void kcryptd_queue_io(struct crypt_io *io) 574static void kcryptd_do_work(void *data)
468{ 575{
469 INIT_WORK(&io->work, kcryptd_do_work, io); 576 struct crypt_io *io = data;
470 queue_work(_kcryptd_workqueue, &io->work); 577
578 process_read_endio(io);
471} 579}
472 580
473/* 581/*
@@ -481,7 +589,7 @@ static int crypt_decode_key(u8 *key, char *hex, unsigned int size)
481 589
482 buffer[2] = '\0'; 590 buffer[2] = '\0';
483 591
484 for(i = 0; i < size; i++) { 592 for (i = 0; i < size; i++) {
485 buffer[0] = *hex++; 593 buffer[0] = *hex++;
486 buffer[1] = *hex++; 594 buffer[1] = *hex++;
487 595
@@ -504,7 +612,7 @@ static void crypt_encode_key(char *hex, u8 *key, unsigned int size)
504{ 612{
505 unsigned int i; 613 unsigned int i;
506 614
507 for(i = 0; i < size; i++) { 615 for (i = 0; i < size; i++) {
508 sprintf(hex, "%02x", *key); 616 sprintf(hex, "%02x", *key);
509 hex += 2; 617 hex += 2;
510 key++; 618 key++;
@@ -725,88 +833,10 @@ static void crypt_dtr(struct dm_target *ti)
725 kfree(cc); 833 kfree(cc);
726} 834}
727 835
728static int crypt_endio(struct bio *bio, unsigned int done, int error)
729{
730 struct crypt_io *io = (struct crypt_io *) bio->bi_private;
731 struct crypt_config *cc = (struct crypt_config *) io->target->private;
732
733 if (bio_data_dir(bio) == WRITE) {
734 /*
735 * free the processed pages, even if
736 * it's only a partially completed write
737 */
738 crypt_free_buffer_pages(cc, bio, done);
739 }
740
741 if (bio->bi_size)
742 return 1;
743
744 bio_put(bio);
745
746 /*
747 * successful reads are decrypted by the worker thread
748 */
749 if ((bio_data_dir(bio) == READ)
750 && bio_flagged(bio, BIO_UPTODATE)) {
751 kcryptd_queue_io(io);
752 return 0;
753 }
754
755 dec_pending(io, error);
756 return error;
757}
758
759static inline struct bio *
760crypt_clone(struct crypt_config *cc, struct crypt_io *io, struct bio *bio,
761 sector_t sector, unsigned int *bvec_idx,
762 struct convert_context *ctx)
763{
764 struct bio *clone;
765
766 if (bio_data_dir(bio) == WRITE) {
767 clone = crypt_alloc_buffer(cc, bio->bi_size,
768 io->first_clone, bvec_idx);
769 if (clone) {
770 ctx->bio_out = clone;
771 if (crypt_convert(cc, ctx) < 0) {
772 crypt_free_buffer_pages(cc, clone,
773 clone->bi_size);
774 bio_put(clone);
775 return NULL;
776 }
777 }
778 } else {
779 /*
780 * The block layer might modify the bvec array, so always
781 * copy the required bvecs because we need the original
782 * one in order to decrypt the whole bio data *afterwards*.
783 */
784 clone = bio_alloc(GFP_NOIO, bio_segments(bio));
785 if (clone) {
786 clone->bi_idx = 0;
787 clone->bi_vcnt = bio_segments(bio);
788 clone->bi_size = bio->bi_size;
789 memcpy(clone->bi_io_vec, bio_iovec(bio),
790 sizeof(struct bio_vec) * clone->bi_vcnt);
791 }
792 }
793
794 if (!clone)
795 return NULL;
796
797 clone->bi_private = io;
798 clone->bi_end_io = crypt_endio;
799 clone->bi_bdev = cc->dev->bdev;
800 clone->bi_sector = cc->start + sector;
801 clone->bi_rw = bio->bi_rw;
802
803 return clone;
804}
805
806static int crypt_map(struct dm_target *ti, struct bio *bio, 836static int crypt_map(struct dm_target *ti, struct bio *bio,
807 union map_info *map_context) 837 union map_info *map_context)
808{ 838{
809 struct crypt_config *cc = (struct crypt_config *) ti->private; 839 struct crypt_config *cc = ti->private;
810 struct crypt_io *io; 840 struct crypt_io *io;
811 struct convert_context ctx; 841 struct convert_context ctx;
812 struct bio *clone; 842 struct bio *clone;
@@ -816,7 +846,7 @@ static int crypt_map(struct dm_target *ti, struct bio *bio,
816 846
817 io = mempool_alloc(cc->io_pool, GFP_NOIO); 847 io = mempool_alloc(cc->io_pool, GFP_NOIO);
818 io->target = ti; 848 io->target = ti;
819 io->bio = bio; 849 io->base_bio = bio;
820 io->first_clone = NULL; 850 io->first_clone = NULL;
821 io->error = 0; 851 io->error = 0;
822 atomic_set(&io->pending, 1); /* hold a reference */ 852 atomic_set(&io->pending, 1); /* hold a reference */
@@ -829,7 +859,10 @@ static int crypt_map(struct dm_target *ti, struct bio *bio,
829 * so repeat the whole process until all the data can be handled. 859 * so repeat the whole process until all the data can be handled.
830 */ 860 */
831 while (remaining) { 861 while (remaining) {
832 clone = crypt_clone(cc, io, bio, sector, &bvec_idx, &ctx); 862 if (bio_data_dir(bio) == WRITE)
863 clone = clone_write(io, sector, &bvec_idx, &ctx);
864 else
865 clone = clone_read(io, sector);
833 if (!clone) 866 if (!clone)
834 goto cleanup; 867 goto cleanup;
835 868