diff options
author | Alasdair G Kergon <agk@redhat.com> | 2006-06-26 03:27:35 -0400 |
---|---|---|
committer | Linus Torvalds <torvalds@g5.osdl.org> | 2006-06-26 12:58:36 -0400 |
commit | 72d9486169a2a8353e022813185ba2f32d7dde69 (patch) | |
tree | 2fe6c382feb3f21d829abf543c54be486007557c /drivers/md/dm-crypt.c | |
parent | 5c6bd75d06db512515a3781aa97e42df2faf0815 (diff) |
[PATCH] dm: improve error message consistency
Tidy device-mapper error messages to include context information
automatically.
Signed-off-by: Alasdair G Kergon <agk@redhat.com>
Signed-off-by: Andrew Morton <akpm@osdl.org>
Signed-off-by: Linus Torvalds <torvalds@osdl.org>
Diffstat (limited to 'drivers/md/dm-crypt.c')
-rw-r--r-- | drivers/md/dm-crypt.c | 56 |
1 files changed, 28 insertions, 28 deletions
diff --git a/drivers/md/dm-crypt.c b/drivers/md/dm-crypt.c index 61a590bb6241..6022ed12a795 100644 --- a/drivers/md/dm-crypt.c +++ b/drivers/md/dm-crypt.c | |||
@@ -20,7 +20,7 @@ | |||
20 | 20 | ||
21 | #include "dm.h" | 21 | #include "dm.h" |
22 | 22 | ||
23 | #define PFX "crypt: " | 23 | #define DM_MSG_PREFIX "crypt" |
24 | 24 | ||
25 | /* | 25 | /* |
26 | * per bio private data | 26 | * per bio private data |
@@ -125,19 +125,19 @@ static int crypt_iv_essiv_ctr(struct crypt_config *cc, struct dm_target *ti, | |||
125 | u8 *salt; | 125 | u8 *salt; |
126 | 126 | ||
127 | if (opts == NULL) { | 127 | if (opts == NULL) { |
128 | ti->error = PFX "Digest algorithm missing for ESSIV mode"; | 128 | ti->error = "Digest algorithm missing for ESSIV mode"; |
129 | return -EINVAL; | 129 | return -EINVAL; |
130 | } | 130 | } |
131 | 131 | ||
132 | /* Hash the cipher key with the given hash algorithm */ | 132 | /* Hash the cipher key with the given hash algorithm */ |
133 | hash_tfm = crypto_alloc_tfm(opts, CRYPTO_TFM_REQ_MAY_SLEEP); | 133 | hash_tfm = crypto_alloc_tfm(opts, CRYPTO_TFM_REQ_MAY_SLEEP); |
134 | if (hash_tfm == NULL) { | 134 | if (hash_tfm == NULL) { |
135 | ti->error = PFX "Error initializing ESSIV hash"; | 135 | ti->error = "Error initializing ESSIV hash"; |
136 | return -EINVAL; | 136 | return -EINVAL; |
137 | } | 137 | } |
138 | 138 | ||
139 | if (crypto_tfm_alg_type(hash_tfm) != CRYPTO_ALG_TYPE_DIGEST) { | 139 | if (crypto_tfm_alg_type(hash_tfm) != CRYPTO_ALG_TYPE_DIGEST) { |
140 | ti->error = PFX "Expected digest algorithm for ESSIV hash"; | 140 | ti->error = "Expected digest algorithm for ESSIV hash"; |
141 | crypto_free_tfm(hash_tfm); | 141 | crypto_free_tfm(hash_tfm); |
142 | return -EINVAL; | 142 | return -EINVAL; |
143 | } | 143 | } |
@@ -145,7 +145,7 @@ static int crypt_iv_essiv_ctr(struct crypt_config *cc, struct dm_target *ti, | |||
145 | saltsize = crypto_tfm_alg_digestsize(hash_tfm); | 145 | saltsize = crypto_tfm_alg_digestsize(hash_tfm); |
146 | salt = kmalloc(saltsize, GFP_KERNEL); | 146 | salt = kmalloc(saltsize, GFP_KERNEL); |
147 | if (salt == NULL) { | 147 | if (salt == NULL) { |
148 | ti->error = PFX "Error kmallocing salt storage in ESSIV"; | 148 | ti->error = "Error kmallocing salt storage in ESSIV"; |
149 | crypto_free_tfm(hash_tfm); | 149 | crypto_free_tfm(hash_tfm); |
150 | return -ENOMEM; | 150 | return -ENOMEM; |
151 | } | 151 | } |
@@ -159,20 +159,20 @@ static int crypt_iv_essiv_ctr(struct crypt_config *cc, struct dm_target *ti, | |||
159 | CRYPTO_TFM_MODE_ECB | | 159 | CRYPTO_TFM_MODE_ECB | |
160 | CRYPTO_TFM_REQ_MAY_SLEEP); | 160 | CRYPTO_TFM_REQ_MAY_SLEEP); |
161 | if (essiv_tfm == NULL) { | 161 | if (essiv_tfm == NULL) { |
162 | ti->error = PFX "Error allocating crypto tfm for ESSIV"; | 162 | ti->error = "Error allocating crypto tfm for ESSIV"; |
163 | kfree(salt); | 163 | kfree(salt); |
164 | return -EINVAL; | 164 | return -EINVAL; |
165 | } | 165 | } |
166 | if (crypto_tfm_alg_blocksize(essiv_tfm) | 166 | if (crypto_tfm_alg_blocksize(essiv_tfm) |
167 | != crypto_tfm_alg_ivsize(cc->tfm)) { | 167 | != crypto_tfm_alg_ivsize(cc->tfm)) { |
168 | ti->error = PFX "Block size of ESSIV cipher does " | 168 | ti->error = "Block size of ESSIV cipher does " |
169 | "not match IV size of block cipher"; | 169 | "not match IV size of block cipher"; |
170 | crypto_free_tfm(essiv_tfm); | 170 | crypto_free_tfm(essiv_tfm); |
171 | kfree(salt); | 171 | kfree(salt); |
172 | return -EINVAL; | 172 | return -EINVAL; |
173 | } | 173 | } |
174 | if (crypto_cipher_setkey(essiv_tfm, salt, saltsize) < 0) { | 174 | if (crypto_cipher_setkey(essiv_tfm, salt, saltsize) < 0) { |
175 | ti->error = PFX "Failed to set key for ESSIV cipher"; | 175 | ti->error = "Failed to set key for ESSIV cipher"; |
176 | crypto_free_tfm(essiv_tfm); | 176 | crypto_free_tfm(essiv_tfm); |
177 | kfree(salt); | 177 | kfree(salt); |
178 | return -EINVAL; | 178 | return -EINVAL; |
@@ -521,7 +521,7 @@ static int crypt_ctr(struct dm_target *ti, unsigned int argc, char **argv) | |||
521 | unsigned long long tmpll; | 521 | unsigned long long tmpll; |
522 | 522 | ||
523 | if (argc != 5) { | 523 | if (argc != 5) { |
524 | ti->error = PFX "Not enough arguments"; | 524 | ti->error = "Not enough arguments"; |
525 | return -EINVAL; | 525 | return -EINVAL; |
526 | } | 526 | } |
527 | 527 | ||
@@ -532,21 +532,21 @@ static int crypt_ctr(struct dm_target *ti, unsigned int argc, char **argv) | |||
532 | ivmode = strsep(&ivopts, ":"); | 532 | ivmode = strsep(&ivopts, ":"); |
533 | 533 | ||
534 | if (tmp) | 534 | if (tmp) |
535 | DMWARN(PFX "Unexpected additional cipher options"); | 535 | DMWARN("Unexpected additional cipher options"); |
536 | 536 | ||
537 | key_size = strlen(argv[1]) >> 1; | 537 | key_size = strlen(argv[1]) >> 1; |
538 | 538 | ||
539 | cc = kmalloc(sizeof(*cc) + key_size * sizeof(u8), GFP_KERNEL); | 539 | cc = kmalloc(sizeof(*cc) + key_size * sizeof(u8), GFP_KERNEL); |
540 | if (cc == NULL) { | 540 | if (cc == NULL) { |
541 | ti->error = | 541 | ti->error = |
542 | PFX "Cannot allocate transparent encryption context"; | 542 | "Cannot allocate transparent encryption context"; |
543 | return -ENOMEM; | 543 | return -ENOMEM; |
544 | } | 544 | } |
545 | 545 | ||
546 | cc->key_size = key_size; | 546 | cc->key_size = key_size; |
547 | if ((!key_size && strcmp(argv[1], "-") != 0) || | 547 | if ((!key_size && strcmp(argv[1], "-") != 0) || |
548 | (key_size && crypt_decode_key(cc->key, argv[1], key_size) < 0)) { | 548 | (key_size && crypt_decode_key(cc->key, argv[1], key_size) < 0)) { |
549 | ti->error = PFX "Error decoding key"; | 549 | ti->error = "Error decoding key"; |
550 | goto bad1; | 550 | goto bad1; |
551 | } | 551 | } |
552 | 552 | ||
@@ -562,22 +562,22 @@ static int crypt_ctr(struct dm_target *ti, unsigned int argc, char **argv) | |||
562 | else if (strcmp(chainmode, "ecb") == 0) | 562 | else if (strcmp(chainmode, "ecb") == 0) |
563 | crypto_flags = CRYPTO_TFM_MODE_ECB; | 563 | crypto_flags = CRYPTO_TFM_MODE_ECB; |
564 | else { | 564 | else { |
565 | ti->error = PFX "Unknown chaining mode"; | 565 | ti->error = "Unknown chaining mode"; |
566 | goto bad1; | 566 | goto bad1; |
567 | } | 567 | } |
568 | 568 | ||
569 | if (crypto_flags != CRYPTO_TFM_MODE_ECB && !ivmode) { | 569 | if (crypto_flags != CRYPTO_TFM_MODE_ECB && !ivmode) { |
570 | ti->error = PFX "This chaining mode requires an IV mechanism"; | 570 | ti->error = "This chaining mode requires an IV mechanism"; |
571 | goto bad1; | 571 | goto bad1; |
572 | } | 572 | } |
573 | 573 | ||
574 | tfm = crypto_alloc_tfm(cipher, crypto_flags | CRYPTO_TFM_REQ_MAY_SLEEP); | 574 | tfm = crypto_alloc_tfm(cipher, crypto_flags | CRYPTO_TFM_REQ_MAY_SLEEP); |
575 | if (!tfm) { | 575 | if (!tfm) { |
576 | ti->error = PFX "Error allocating crypto tfm"; | 576 | ti->error = "Error allocating crypto tfm"; |
577 | goto bad1; | 577 | goto bad1; |
578 | } | 578 | } |
579 | if (crypto_tfm_alg_type(tfm) != CRYPTO_ALG_TYPE_CIPHER) { | 579 | if (crypto_tfm_alg_type(tfm) != CRYPTO_ALG_TYPE_CIPHER) { |
580 | ti->error = PFX "Expected cipher algorithm"; | 580 | ti->error = "Expected cipher algorithm"; |
581 | goto bad2; | 581 | goto bad2; |
582 | } | 582 | } |
583 | 583 | ||
@@ -595,7 +595,7 @@ static int crypt_ctr(struct dm_target *ti, unsigned int argc, char **argv) | |||
595 | else if (strcmp(ivmode, "essiv") == 0) | 595 | else if (strcmp(ivmode, "essiv") == 0) |
596 | cc->iv_gen_ops = &crypt_iv_essiv_ops; | 596 | cc->iv_gen_ops = &crypt_iv_essiv_ops; |
597 | else { | 597 | else { |
598 | ti->error = PFX "Invalid IV mode"; | 598 | ti->error = "Invalid IV mode"; |
599 | goto bad2; | 599 | goto bad2; |
600 | } | 600 | } |
601 | 601 | ||
@@ -610,7 +610,7 @@ static int crypt_ctr(struct dm_target *ti, unsigned int argc, char **argv) | |||
610 | else { | 610 | else { |
611 | cc->iv_size = 0; | 611 | cc->iv_size = 0; |
612 | if (cc->iv_gen_ops) { | 612 | if (cc->iv_gen_ops) { |
613 | DMWARN(PFX "Selected cipher does not support IVs"); | 613 | DMWARN("Selected cipher does not support IVs"); |
614 | if (cc->iv_gen_ops->dtr) | 614 | if (cc->iv_gen_ops->dtr) |
615 | cc->iv_gen_ops->dtr(cc); | 615 | cc->iv_gen_ops->dtr(cc); |
616 | cc->iv_gen_ops = NULL; | 616 | cc->iv_gen_ops = NULL; |
@@ -619,36 +619,36 @@ static int crypt_ctr(struct dm_target *ti, unsigned int argc, char **argv) | |||
619 | 619 | ||
620 | cc->io_pool = mempool_create_slab_pool(MIN_IOS, _crypt_io_pool); | 620 | cc->io_pool = mempool_create_slab_pool(MIN_IOS, _crypt_io_pool); |
621 | if (!cc->io_pool) { | 621 | if (!cc->io_pool) { |
622 | ti->error = PFX "Cannot allocate crypt io mempool"; | 622 | ti->error = "Cannot allocate crypt io mempool"; |
623 | goto bad3; | 623 | goto bad3; |
624 | } | 624 | } |
625 | 625 | ||
626 | cc->page_pool = mempool_create_page_pool(MIN_POOL_PAGES, 0); | 626 | cc->page_pool = mempool_create_page_pool(MIN_POOL_PAGES, 0); |
627 | if (!cc->page_pool) { | 627 | if (!cc->page_pool) { |
628 | ti->error = PFX "Cannot allocate page mempool"; | 628 | ti->error = "Cannot allocate page mempool"; |
629 | goto bad4; | 629 | goto bad4; |
630 | } | 630 | } |
631 | 631 | ||
632 | if (tfm->crt_cipher.cit_setkey(tfm, cc->key, key_size) < 0) { | 632 | if (tfm->crt_cipher.cit_setkey(tfm, cc->key, key_size) < 0) { |
633 | ti->error = PFX "Error setting key"; | 633 | ti->error = "Error setting key"; |
634 | goto bad5; | 634 | goto bad5; |
635 | } | 635 | } |
636 | 636 | ||
637 | if (sscanf(argv[2], "%llu", &tmpll) != 1) { | 637 | if (sscanf(argv[2], "%llu", &tmpll) != 1) { |
638 | ti->error = PFX "Invalid iv_offset sector"; | 638 | ti->error = "Invalid iv_offset sector"; |
639 | goto bad5; | 639 | goto bad5; |
640 | } | 640 | } |
641 | cc->iv_offset = tmpll; | 641 | cc->iv_offset = tmpll; |
642 | 642 | ||
643 | if (sscanf(argv[4], "%llu", &tmpll) != 1) { | 643 | if (sscanf(argv[4], "%llu", &tmpll) != 1) { |
644 | ti->error = PFX "Invalid device sector"; | 644 | ti->error = "Invalid device sector"; |
645 | goto bad5; | 645 | goto bad5; |
646 | } | 646 | } |
647 | cc->start = tmpll; | 647 | cc->start = tmpll; |
648 | 648 | ||
649 | if (dm_get_device(ti, argv[3], cc->start, ti->len, | 649 | if (dm_get_device(ti, argv[3], cc->start, ti->len, |
650 | dm_table_get_mode(ti->table), &cc->dev)) { | 650 | dm_table_get_mode(ti->table), &cc->dev)) { |
651 | ti->error = PFX "Device lookup failed"; | 651 | ti->error = "Device lookup failed"; |
652 | goto bad5; | 652 | goto bad5; |
653 | } | 653 | } |
654 | 654 | ||
@@ -657,7 +657,7 @@ static int crypt_ctr(struct dm_target *ti, unsigned int argc, char **argv) | |||
657 | *(ivopts - 1) = ':'; | 657 | *(ivopts - 1) = ':'; |
658 | cc->iv_mode = kmalloc(strlen(ivmode) + 1, GFP_KERNEL); | 658 | cc->iv_mode = kmalloc(strlen(ivmode) + 1, GFP_KERNEL); |
659 | if (!cc->iv_mode) { | 659 | if (!cc->iv_mode) { |
660 | ti->error = PFX "Error kmallocing iv_mode string"; | 660 | ti->error = "Error kmallocing iv_mode string"; |
661 | goto bad5; | 661 | goto bad5; |
662 | } | 662 | } |
663 | strcpy(cc->iv_mode, ivmode); | 663 | strcpy(cc->iv_mode, ivmode); |
@@ -918,13 +918,13 @@ static int __init dm_crypt_init(void) | |||
918 | _kcryptd_workqueue = create_workqueue("kcryptd"); | 918 | _kcryptd_workqueue = create_workqueue("kcryptd"); |
919 | if (!_kcryptd_workqueue) { | 919 | if (!_kcryptd_workqueue) { |
920 | r = -ENOMEM; | 920 | r = -ENOMEM; |
921 | DMERR(PFX "couldn't create kcryptd"); | 921 | DMERR("couldn't create kcryptd"); |
922 | goto bad1; | 922 | goto bad1; |
923 | } | 923 | } |
924 | 924 | ||
925 | r = dm_register_target(&crypt_target); | 925 | r = dm_register_target(&crypt_target); |
926 | if (r < 0) { | 926 | if (r < 0) { |
927 | DMERR(PFX "register failed %d", r); | 927 | DMERR("register failed %d", r); |
928 | goto bad2; | 928 | goto bad2; |
929 | } | 929 | } |
930 | 930 | ||
@@ -942,7 +942,7 @@ static void __exit dm_crypt_exit(void) | |||
942 | int r = dm_unregister_target(&crypt_target); | 942 | int r = dm_unregister_target(&crypt_target); |
943 | 943 | ||
944 | if (r < 0) | 944 | if (r < 0) |
945 | DMERR(PFX "unregister failed %d", r); | 945 | DMERR("unregister failed %d", r); |
946 | 946 | ||
947 | destroy_workqueue(_kcryptd_workqueue); | 947 | destroy_workqueue(_kcryptd_workqueue); |
948 | kmem_cache_destroy(_crypt_io_pool); | 948 | kmem_cache_destroy(_crypt_io_pool); |