diff options
Diffstat (limited to 'drivers/md')
-rw-r--r-- | drivers/md/dm-crypt.c | 193 |
1 files changed, 192 insertions, 1 deletions
diff --git a/drivers/md/dm-crypt.c b/drivers/md/dm-crypt.c index b8b9267c4dbb..4e054bd91664 100644 --- a/drivers/md/dm-crypt.c +++ b/drivers/md/dm-crypt.c | |||
@@ -23,6 +23,9 @@ | |||
23 | #include <linux/scatterlist.h> | 23 | #include <linux/scatterlist.h> |
24 | #include <asm/page.h> | 24 | #include <asm/page.h> |
25 | #include <asm/unaligned.h> | 25 | #include <asm/unaligned.h> |
26 | #include <crypto/hash.h> | ||
27 | #include <crypto/md5.h> | ||
28 | #include <crypto/algapi.h> | ||
26 | 29 | ||
27 | #include <linux/device-mapper.h> | 30 | #include <linux/device-mapper.h> |
28 | 31 | ||
@@ -90,6 +93,12 @@ struct iv_benbi_private { | |||
90 | int shift; | 93 | int shift; |
91 | }; | 94 | }; |
92 | 95 | ||
96 | #define LMK_SEED_SIZE 64 /* hash + 0 */ | ||
97 | struct iv_lmk_private { | ||
98 | struct crypto_shash *hash_tfm; | ||
99 | u8 *seed; | ||
100 | }; | ||
101 | |||
93 | /* | 102 | /* |
94 | * Crypt: maps a linear range of a block device | 103 | * Crypt: maps a linear range of a block device |
95 | * and encrypts / decrypts at the same time. | 104 | * and encrypts / decrypts at the same time. |
@@ -133,6 +142,7 @@ struct crypt_config { | |||
133 | union { | 142 | union { |
134 | struct iv_essiv_private essiv; | 143 | struct iv_essiv_private essiv; |
135 | struct iv_benbi_private benbi; | 144 | struct iv_benbi_private benbi; |
145 | struct iv_lmk_private lmk; | ||
136 | } iv_gen_private; | 146 | } iv_gen_private; |
137 | sector_t iv_offset; | 147 | sector_t iv_offset; |
138 | unsigned int iv_size; | 148 | unsigned int iv_size; |
@@ -207,6 +217,20 @@ static struct crypto_ablkcipher *any_tfm(struct crypt_config *cc) | |||
207 | * null: the initial vector is always zero. Provides compatibility with | 217 | * null: the initial vector is always zero. Provides compatibility with |
208 | * obsolete loop_fish2 devices. Do not use for new devices. | 218 | * obsolete loop_fish2 devices. Do not use for new devices. |
209 | * | 219 | * |
220 | * lmk: Compatible implementation of the block chaining mode used | ||
221 | * by the Loop-AES block device encryption system | ||
222 | * designed by Jari Ruusu. See http://loop-aes.sourceforge.net/ | ||
223 | * It operates on full 512 byte sectors and uses CBC | ||
224 | * with an IV derived from the sector number, the data and | ||
225 | * optionally extra IV seed. | ||
226 | * This means that after decryption the first block | ||
227 | * of sector must be tweaked according to decrypted data. | ||
228 | * Loop-AES can use three encryption schemes: | ||
229 | * version 1: is plain aes-cbc mode | ||
230 | * version 2: uses 64 multikey scheme with lmk IV generator | ||
231 | * version 3: the same as version 2 with additional IV seed | ||
232 | * (it uses 65 keys, last key is used as IV seed) | ||
233 | * | ||
210 | * plumb: unimplemented, see: | 234 | * plumb: unimplemented, see: |
211 | * http://article.gmane.org/gmane.linux.kernel.device-mapper.dm-crypt/454 | 235 | * http://article.gmane.org/gmane.linux.kernel.device-mapper.dm-crypt/454 |
212 | */ | 236 | */ |
@@ -446,6 +470,156 @@ static int crypt_iv_null_gen(struct crypt_config *cc, u8 *iv, | |||
446 | return 0; | 470 | return 0; |
447 | } | 471 | } |
448 | 472 | ||
473 | static void crypt_iv_lmk_dtr(struct crypt_config *cc) | ||
474 | { | ||
475 | struct iv_lmk_private *lmk = &cc->iv_gen_private.lmk; | ||
476 | |||
477 | if (lmk->hash_tfm && !IS_ERR(lmk->hash_tfm)) | ||
478 | crypto_free_shash(lmk->hash_tfm); | ||
479 | lmk->hash_tfm = NULL; | ||
480 | |||
481 | kzfree(lmk->seed); | ||
482 | lmk->seed = NULL; | ||
483 | } | ||
484 | |||
485 | static int crypt_iv_lmk_ctr(struct crypt_config *cc, struct dm_target *ti, | ||
486 | const char *opts) | ||
487 | { | ||
488 | struct iv_lmk_private *lmk = &cc->iv_gen_private.lmk; | ||
489 | |||
490 | lmk->hash_tfm = crypto_alloc_shash("md5", 0, 0); | ||
491 | if (IS_ERR(lmk->hash_tfm)) { | ||
492 | ti->error = "Error initializing LMK hash"; | ||
493 | return PTR_ERR(lmk->hash_tfm); | ||
494 | } | ||
495 | |||
496 | /* No seed in LMK version 2 */ | ||
497 | if (cc->key_parts == cc->tfms_count) { | ||
498 | lmk->seed = NULL; | ||
499 | return 0; | ||
500 | } | ||
501 | |||
502 | lmk->seed = kzalloc(LMK_SEED_SIZE, GFP_KERNEL); | ||
503 | if (!lmk->seed) { | ||
504 | crypt_iv_lmk_dtr(cc); | ||
505 | ti->error = "Error kmallocing seed storage in LMK"; | ||
506 | return -ENOMEM; | ||
507 | } | ||
508 | |||
509 | return 0; | ||
510 | } | ||
511 | |||
512 | static int crypt_iv_lmk_init(struct crypt_config *cc) | ||
513 | { | ||
514 | struct iv_lmk_private *lmk = &cc->iv_gen_private.lmk; | ||
515 | int subkey_size = cc->key_size / cc->key_parts; | ||
516 | |||
517 | /* LMK seed is on the position of LMK_KEYS + 1 key */ | ||
518 | if (lmk->seed) | ||
519 | memcpy(lmk->seed, cc->key + (cc->tfms_count * subkey_size), | ||
520 | crypto_shash_digestsize(lmk->hash_tfm)); | ||
521 | |||
522 | return 0; | ||
523 | } | ||
524 | |||
525 | static int crypt_iv_lmk_wipe(struct crypt_config *cc) | ||
526 | { | ||
527 | struct iv_lmk_private *lmk = &cc->iv_gen_private.lmk; | ||
528 | |||
529 | if (lmk->seed) | ||
530 | memset(lmk->seed, 0, LMK_SEED_SIZE); | ||
531 | |||
532 | return 0; | ||
533 | } | ||
534 | |||
535 | static int crypt_iv_lmk_one(struct crypt_config *cc, u8 *iv, | ||
536 | struct dm_crypt_request *dmreq, | ||
537 | u8 *data) | ||
538 | { | ||
539 | struct iv_lmk_private *lmk = &cc->iv_gen_private.lmk; | ||
540 | struct { | ||
541 | struct shash_desc desc; | ||
542 | char ctx[crypto_shash_descsize(lmk->hash_tfm)]; | ||
543 | } sdesc; | ||
544 | struct md5_state md5state; | ||
545 | u32 buf[4]; | ||
546 | int i, r; | ||
547 | |||
548 | sdesc.desc.tfm = lmk->hash_tfm; | ||
549 | sdesc.desc.flags = CRYPTO_TFM_REQ_MAY_SLEEP; | ||
550 | |||
551 | r = crypto_shash_init(&sdesc.desc); | ||
552 | if (r) | ||
553 | return r; | ||
554 | |||
555 | if (lmk->seed) { | ||
556 | r = crypto_shash_update(&sdesc.desc, lmk->seed, LMK_SEED_SIZE); | ||
557 | if (r) | ||
558 | return r; | ||
559 | } | ||
560 | |||
561 | /* Sector is always 512B, block size 16, add data of blocks 1-31 */ | ||
562 | r = crypto_shash_update(&sdesc.desc, data + 16, 16 * 31); | ||
563 | if (r) | ||
564 | return r; | ||
565 | |||
566 | /* Sector is cropped to 56 bits here */ | ||
567 | buf[0] = cpu_to_le32(dmreq->iv_sector & 0xFFFFFFFF); | ||
568 | buf[1] = cpu_to_le32((((u64)dmreq->iv_sector >> 32) & 0x00FFFFFF) | 0x80000000); | ||
569 | buf[2] = cpu_to_le32(4024); | ||
570 | buf[3] = 0; | ||
571 | r = crypto_shash_update(&sdesc.desc, (u8 *)buf, sizeof(buf)); | ||
572 | if (r) | ||
573 | return r; | ||
574 | |||
575 | /* No MD5 padding here */ | ||
576 | r = crypto_shash_export(&sdesc.desc, &md5state); | ||
577 | if (r) | ||
578 | return r; | ||
579 | |||
580 | for (i = 0; i < MD5_HASH_WORDS; i++) | ||
581 | __cpu_to_le32s(&md5state.hash[i]); | ||
582 | memcpy(iv, &md5state.hash, cc->iv_size); | ||
583 | |||
584 | return 0; | ||
585 | } | ||
586 | |||
587 | static int crypt_iv_lmk_gen(struct crypt_config *cc, u8 *iv, | ||
588 | struct dm_crypt_request *dmreq) | ||
589 | { | ||
590 | u8 *src; | ||
591 | int r = 0; | ||
592 | |||
593 | if (bio_data_dir(dmreq->ctx->bio_in) == WRITE) { | ||
594 | src = kmap_atomic(sg_page(&dmreq->sg_in), KM_USER0); | ||
595 | r = crypt_iv_lmk_one(cc, iv, dmreq, src + dmreq->sg_in.offset); | ||
596 | kunmap_atomic(src, KM_USER0); | ||
597 | } else | ||
598 | memset(iv, 0, cc->iv_size); | ||
599 | |||
600 | return r; | ||
601 | } | ||
602 | |||
603 | static int crypt_iv_lmk_post(struct crypt_config *cc, u8 *iv, | ||
604 | struct dm_crypt_request *dmreq) | ||
605 | { | ||
606 | u8 *dst; | ||
607 | int r; | ||
608 | |||
609 | if (bio_data_dir(dmreq->ctx->bio_in) == WRITE) | ||
610 | return 0; | ||
611 | |||
612 | dst = kmap_atomic(sg_page(&dmreq->sg_out), KM_USER0); | ||
613 | r = crypt_iv_lmk_one(cc, iv, dmreq, dst + dmreq->sg_out.offset); | ||
614 | |||
615 | /* Tweak the first block of plaintext sector */ | ||
616 | if (!r) | ||
617 | crypto_xor(dst + dmreq->sg_out.offset, iv, cc->iv_size); | ||
618 | |||
619 | kunmap_atomic(dst, KM_USER0); | ||
620 | return r; | ||
621 | } | ||
622 | |||
449 | static struct crypt_iv_operations crypt_iv_plain_ops = { | 623 | static struct crypt_iv_operations crypt_iv_plain_ops = { |
450 | .generator = crypt_iv_plain_gen | 624 | .generator = crypt_iv_plain_gen |
451 | }; | 625 | }; |
@@ -472,6 +646,15 @@ static struct crypt_iv_operations crypt_iv_null_ops = { | |||
472 | .generator = crypt_iv_null_gen | 646 | .generator = crypt_iv_null_gen |
473 | }; | 647 | }; |
474 | 648 | ||
649 | static struct crypt_iv_operations crypt_iv_lmk_ops = { | ||
650 | .ctr = crypt_iv_lmk_ctr, | ||
651 | .dtr = crypt_iv_lmk_dtr, | ||
652 | .init = crypt_iv_lmk_init, | ||
653 | .wipe = crypt_iv_lmk_wipe, | ||
654 | .generator = crypt_iv_lmk_gen, | ||
655 | .post = crypt_iv_lmk_post | ||
656 | }; | ||
657 | |||
475 | static void crypt_convert_init(struct crypt_config *cc, | 658 | static void crypt_convert_init(struct crypt_config *cc, |
476 | struct convert_context *ctx, | 659 | struct convert_context *ctx, |
477 | struct bio *bio_out, struct bio *bio_in, | 660 | struct bio *bio_out, struct bio *bio_in, |
@@ -1341,7 +1524,15 @@ static int crypt_ctr_cipher(struct dm_target *ti, | |||
1341 | cc->iv_gen_ops = &crypt_iv_benbi_ops; | 1524 | cc->iv_gen_ops = &crypt_iv_benbi_ops; |
1342 | else if (strcmp(ivmode, "null") == 0) | 1525 | else if (strcmp(ivmode, "null") == 0) |
1343 | cc->iv_gen_ops = &crypt_iv_null_ops; | 1526 | cc->iv_gen_ops = &crypt_iv_null_ops; |
1344 | else { | 1527 | else if (strcmp(ivmode, "lmk") == 0) { |
1528 | cc->iv_gen_ops = &crypt_iv_lmk_ops; | ||
1529 | /* Version 2 and 3 is recognised according | ||
1530 | * to length of provided multi-key string. | ||
1531 | * If present (version 3), last key is used as IV seed. | ||
1532 | */ | ||
1533 | if (cc->key_size % cc->key_parts) | ||
1534 | cc->key_parts++; | ||
1535 | } else { | ||
1345 | ret = -EINVAL; | 1536 | ret = -EINVAL; |
1346 | ti->error = "Invalid IV mode"; | 1537 | ti->error = "Invalid IV mode"; |
1347 | goto bad; | 1538 | goto bad; |