aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorMilan Broz <mbroz@redhat.com>2011-01-13 14:59:54 -0500
committerAlasdair G Kergon <agk@redhat.com>2011-01-13 14:59:54 -0500
commit2dc5327d3acb3340ab6fa3981401b076b78a51f4 (patch)
tree32297319cd947ea8ab867ec97042983382959db8
parent20c82538e4f5ede51bc2b4795bc6e5cae772796d (diff)
dm crypt: add post iv call to iv generator
IV (initialisation vector) can in principle depend not only on sector but also on plaintext data (or other attributes). Change IV generator interface to work directly with dmreq structure to allow such dependence in generator. Also add post() function which is called after the crypto operation. This allows tricky modification of decrypted data or IV internals. In asynchronous mode the post() can be called after ctx->sector count was increased so it is needed to add iv_sector copy directly to dmreq structure. (N.B. dmreq always include only one sector in scatterlists) Signed-off-by: Milan Broz <mbroz@redhat.com> Signed-off-by: Alasdair G Kergon <agk@redhat.com>
-rw-r--r--drivers/md/dm-crypt.c48
1 files changed, 35 insertions, 13 deletions
diff --git a/drivers/md/dm-crypt.c b/drivers/md/dm-crypt.c
index dc4403bcc6a0..e0ebe685be6a 100644
--- a/drivers/md/dm-crypt.c
+++ b/drivers/md/dm-crypt.c
@@ -64,6 +64,7 @@ struct dm_crypt_request {
64 struct convert_context *ctx; 64 struct convert_context *ctx;
65 struct scatterlist sg_in; 65 struct scatterlist sg_in;
66 struct scatterlist sg_out; 66 struct scatterlist sg_out;
67 sector_t iv_sector;
67}; 68};
68 69
69struct crypt_config; 70struct crypt_config;
@@ -74,7 +75,10 @@ struct crypt_iv_operations {
74 void (*dtr)(struct crypt_config *cc); 75 void (*dtr)(struct crypt_config *cc);
75 int (*init)(struct crypt_config *cc); 76 int (*init)(struct crypt_config *cc);
76 int (*wipe)(struct crypt_config *cc); 77 int (*wipe)(struct crypt_config *cc);
77 int (*generator)(struct crypt_config *cc, u8 *iv, sector_t sector); 78 int (*generator)(struct crypt_config *cc, u8 *iv,
79 struct dm_crypt_request *dmreq);
80 int (*post)(struct crypt_config *cc, u8 *iv,
81 struct dm_crypt_request *dmreq);
78}; 82};
79 83
80struct iv_essiv_private { 84struct iv_essiv_private {
@@ -168,6 +172,7 @@ static struct kmem_cache *_crypt_io_pool;
168 172
169static void clone_init(struct dm_crypt_io *, struct bio *); 173static void clone_init(struct dm_crypt_io *, struct bio *);
170static void kcryptd_queue_crypt(struct dm_crypt_io *io); 174static void kcryptd_queue_crypt(struct dm_crypt_io *io);
175static u8 *iv_of_dmreq(struct crypt_config *cc, struct dm_crypt_request *dmreq);
171 176
172static struct crypt_cpu *this_crypt_config(struct crypt_config *cc) 177static struct crypt_cpu *this_crypt_config(struct crypt_config *cc)
173{ 178{
@@ -205,19 +210,20 @@ static struct crypto_ablkcipher *any_tfm(struct crypt_config *cc)
205 * http://article.gmane.org/gmane.linux.kernel.device-mapper.dm-crypt/454 210 * http://article.gmane.org/gmane.linux.kernel.device-mapper.dm-crypt/454
206 */ 211 */
207 212
208static int crypt_iv_plain_gen(struct crypt_config *cc, u8 *iv, sector_t sector) 213static int crypt_iv_plain_gen(struct crypt_config *cc, u8 *iv,
214 struct dm_crypt_request *dmreq)
209{ 215{
210 memset(iv, 0, cc->iv_size); 216 memset(iv, 0, cc->iv_size);
211 *(u32 *)iv = cpu_to_le32(sector & 0xffffffff); 217 *(u32 *)iv = cpu_to_le32(dmreq->iv_sector & 0xffffffff);
212 218
213 return 0; 219 return 0;
214} 220}
215 221
216static int crypt_iv_plain64_gen(struct crypt_config *cc, u8 *iv, 222static int crypt_iv_plain64_gen(struct crypt_config *cc, u8 *iv,
217 sector_t sector) 223 struct dm_crypt_request *dmreq)
218{ 224{
219 memset(iv, 0, cc->iv_size); 225 memset(iv, 0, cc->iv_size);
220 *(u64 *)iv = cpu_to_le64(sector); 226 *(u64 *)iv = cpu_to_le64(dmreq->iv_sector);
221 227
222 return 0; 228 return 0;
223} 229}
@@ -378,12 +384,13 @@ bad:
378 return err; 384 return err;
379} 385}
380 386
381static int crypt_iv_essiv_gen(struct crypt_config *cc, u8 *iv, sector_t sector) 387static int crypt_iv_essiv_gen(struct crypt_config *cc, u8 *iv,
388 struct dm_crypt_request *dmreq)
382{ 389{
383 struct crypto_cipher *essiv_tfm = this_crypt_config(cc)->iv_private; 390 struct crypto_cipher *essiv_tfm = this_crypt_config(cc)->iv_private;
384 391
385 memset(iv, 0, cc->iv_size); 392 memset(iv, 0, cc->iv_size);
386 *(u64 *)iv = cpu_to_le64(sector); 393 *(u64 *)iv = cpu_to_le64(dmreq->iv_sector);
387 crypto_cipher_encrypt_one(essiv_tfm, iv, iv); 394 crypto_cipher_encrypt_one(essiv_tfm, iv, iv);
388 395
389 return 0; 396 return 0;
@@ -417,19 +424,21 @@ static void crypt_iv_benbi_dtr(struct crypt_config *cc)
417{ 424{
418} 425}
419 426
420static int crypt_iv_benbi_gen(struct crypt_config *cc, u8 *iv, sector_t sector) 427static int crypt_iv_benbi_gen(struct crypt_config *cc, u8 *iv,
428 struct dm_crypt_request *dmreq)
421{ 429{
422 __be64 val; 430 __be64 val;
423 431
424 memset(iv, 0, cc->iv_size - sizeof(u64)); /* rest is cleared below */ 432 memset(iv, 0, cc->iv_size - sizeof(u64)); /* rest is cleared below */
425 433
426 val = cpu_to_be64(((u64)sector << cc->iv_gen_private.benbi.shift) + 1); 434 val = cpu_to_be64(((u64)dmreq->iv_sector << cc->iv_gen_private.benbi.shift) + 1);
427 put_unaligned(val, (__be64 *)(iv + cc->iv_size - sizeof(u64))); 435 put_unaligned(val, (__be64 *)(iv + cc->iv_size - sizeof(u64)));
428 436
429 return 0; 437 return 0;
430} 438}
431 439
432static int crypt_iv_null_gen(struct crypt_config *cc, u8 *iv, sector_t sector) 440static int crypt_iv_null_gen(struct crypt_config *cc, u8 *iv,
441 struct dm_crypt_request *dmreq)
433{ 442{
434 memset(iv, 0, cc->iv_size); 443 memset(iv, 0, cc->iv_size);
435 444
@@ -489,6 +498,13 @@ static struct ablkcipher_request *req_of_dmreq(struct crypt_config *cc,
489 return (struct ablkcipher_request *)((char *)dmreq - cc->dmreq_start); 498 return (struct ablkcipher_request *)((char *)dmreq - cc->dmreq_start);
490} 499}
491 500
501static u8 *iv_of_dmreq(struct crypt_config *cc,
502 struct dm_crypt_request *dmreq)
503{
504 return (u8 *)ALIGN((unsigned long)(dmreq + 1),
505 crypto_ablkcipher_alignmask(any_tfm(cc)) + 1);
506}
507
492static int crypt_convert_block(struct crypt_config *cc, 508static int crypt_convert_block(struct crypt_config *cc,
493 struct convert_context *ctx, 509 struct convert_context *ctx,
494 struct ablkcipher_request *req) 510 struct ablkcipher_request *req)
@@ -500,9 +516,9 @@ static int crypt_convert_block(struct crypt_config *cc,
500 int r = 0; 516 int r = 0;
501 517
502 dmreq = dmreq_of_req(cc, req); 518 dmreq = dmreq_of_req(cc, req);
503 iv = (u8 *)ALIGN((unsigned long)(dmreq + 1), 519 iv = iv_of_dmreq(cc, dmreq);
504 crypto_ablkcipher_alignmask(any_tfm(cc)) + 1);
505 520
521 dmreq->iv_sector = ctx->sector;
506 dmreq->ctx = ctx; 522 dmreq->ctx = ctx;
507 sg_init_table(&dmreq->sg_in, 1); 523 sg_init_table(&dmreq->sg_in, 1);
508 sg_set_page(&dmreq->sg_in, bv_in->bv_page, 1 << SECTOR_SHIFT, 524 sg_set_page(&dmreq->sg_in, bv_in->bv_page, 1 << SECTOR_SHIFT,
@@ -525,7 +541,7 @@ static int crypt_convert_block(struct crypt_config *cc,
525 } 541 }
526 542
527 if (cc->iv_gen_ops) { 543 if (cc->iv_gen_ops) {
528 r = cc->iv_gen_ops->generator(cc, iv, ctx->sector); 544 r = cc->iv_gen_ops->generator(cc, iv, dmreq);
529 if (r < 0) 545 if (r < 0)
530 return r; 546 return r;
531 } 547 }
@@ -538,6 +554,9 @@ static int crypt_convert_block(struct crypt_config *cc,
538 else 554 else
539 r = crypto_ablkcipher_decrypt(req); 555 r = crypto_ablkcipher_decrypt(req);
540 556
557 if (!r && cc->iv_gen_ops && cc->iv_gen_ops->post)
558 r = cc->iv_gen_ops->post(cc, iv, dmreq);
559
541 return r; 560 return r;
542} 561}
543 562
@@ -1005,6 +1024,9 @@ static void kcryptd_async_done(struct crypto_async_request *async_req,
1005 return; 1024 return;
1006 } 1025 }
1007 1026
1027 if (!error && cc->iv_gen_ops && cc->iv_gen_ops->post)
1028 error = cc->iv_gen_ops->post(cc, iv_of_dmreq(cc, dmreq), dmreq);
1029
1008 mempool_free(req_of_dmreq(cc, dmreq), cc->req_pool); 1030 mempool_free(req_of_dmreq(cc, dmreq), cc->req_pool);
1009 1031
1010 if (!atomic_dec_and_test(&ctx->pending)) 1032 if (!atomic_dec_and_test(&ctx->pending))