aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorLinus Torvalds <torvalds@linux-foundation.org>2018-01-19 18:16:49 -0500
committerLinus Torvalds <torvalds@linux-foundation.org>2018-01-19 18:16:49 -0500
commit1cf55613a6a95a8f6c2ad8565ef92ef97b2af2fd (patch)
tree5ef3b87deeda26056b7b2e1ccdcc0cb0b16fd984
parentec835f8104a21f4d4eeb9d316ee71d2b4a7f00de (diff)
parent3cc2e57c4beabcbbaa46e1ac6d77ca8276a4a42d (diff)
Merge tag 'for-4.15/dm-fixes-2' of git://git.kernel.org/pub/scm/linux/kernel/git/device-mapper/linux-dm
Pull device mapper fixes from Mike Snitzer: "All fixes marked for stable: - Fix DM thinp btree corruption seen when inserting a new key/value pair into a full root node. - Fix DM thinp btree removal deadlock due to artificially low number of allowed concurrent locks allowed. - Fix possible DM crypt corruption if kernel keyring service is used. Only affects ciphers using following IVs: essiv, lmk and tcw. - Two DM crypt device initialization error checking fixes. - Fix DM integrity to allow use of async ciphers that require DMA" * tag 'for-4.15/dm-fixes-2' of git://git.kernel.org/pub/scm/linux/kernel/git/device-mapper/linux-dm: dm crypt: fix error return code in crypt_ctr() dm crypt: wipe kernel key copy after IV initialization dm integrity: don't store cipher request on the stack dm crypt: fix crash by adding missing check for auth key size dm btree: fix serious bug in btree_split_beneath() dm thin metadata: THIN_MAX_CONCURRENT_LOCKS should be 6
-rw-r--r--drivers/md/dm-crypt.c20
-rw-r--r--drivers/md/dm-integrity.c49
-rw-r--r--drivers/md/dm-thin-metadata.c6
-rw-r--r--drivers/md/persistent-data/dm-btree.c19
4 files changed, 59 insertions, 35 deletions
diff --git a/drivers/md/dm-crypt.c b/drivers/md/dm-crypt.c
index 9fc12f556534..554d60394c06 100644
--- a/drivers/md/dm-crypt.c
+++ b/drivers/md/dm-crypt.c
@@ -1954,10 +1954,15 @@ static int crypt_setkey(struct crypt_config *cc)
1954 /* Ignore extra keys (which are used for IV etc) */ 1954 /* Ignore extra keys (which are used for IV etc) */
1955 subkey_size = crypt_subkey_size(cc); 1955 subkey_size = crypt_subkey_size(cc);
1956 1956
1957 if (crypt_integrity_hmac(cc)) 1957 if (crypt_integrity_hmac(cc)) {
1958 if (subkey_size < cc->key_mac_size)
1959 return -EINVAL;
1960
1958 crypt_copy_authenckey(cc->authenc_key, cc->key, 1961 crypt_copy_authenckey(cc->authenc_key, cc->key,
1959 subkey_size - cc->key_mac_size, 1962 subkey_size - cc->key_mac_size,
1960 cc->key_mac_size); 1963 cc->key_mac_size);
1964 }
1965
1961 for (i = 0; i < cc->tfms_count; i++) { 1966 for (i = 0; i < cc->tfms_count; i++) {
1962 if (crypt_integrity_hmac(cc)) 1967 if (crypt_integrity_hmac(cc))
1963 r = crypto_aead_setkey(cc->cipher_tfm.tfms_aead[i], 1968 r = crypto_aead_setkey(cc->cipher_tfm.tfms_aead[i],
@@ -2053,9 +2058,6 @@ static int crypt_set_keyring_key(struct crypt_config *cc, const char *key_string
2053 2058
2054 ret = crypt_setkey(cc); 2059 ret = crypt_setkey(cc);
2055 2060
2056 /* wipe the kernel key payload copy in each case */
2057 memset(cc->key, 0, cc->key_size * sizeof(u8));
2058
2059 if (!ret) { 2061 if (!ret) {
2060 set_bit(DM_CRYPT_KEY_VALID, &cc->flags); 2062 set_bit(DM_CRYPT_KEY_VALID, &cc->flags);
2061 kzfree(cc->key_string); 2063 kzfree(cc->key_string);
@@ -2523,6 +2525,10 @@ static int crypt_ctr_cipher(struct dm_target *ti, char *cipher_in, char *key)
2523 } 2525 }
2524 } 2526 }
2525 2527
2528 /* wipe the kernel key payload copy */
2529 if (cc->key_string)
2530 memset(cc->key, 0, cc->key_size * sizeof(u8));
2531
2526 return ret; 2532 return ret;
2527} 2533}
2528 2534
@@ -2740,6 +2746,7 @@ static int crypt_ctr(struct dm_target *ti, unsigned int argc, char **argv)
2740 cc->tag_pool_max_sectors * cc->on_disk_tag_size); 2746 cc->tag_pool_max_sectors * cc->on_disk_tag_size);
2741 if (!cc->tag_pool) { 2747 if (!cc->tag_pool) {
2742 ti->error = "Cannot allocate integrity tags mempool"; 2748 ti->error = "Cannot allocate integrity tags mempool";
2749 ret = -ENOMEM;
2743 goto bad; 2750 goto bad;
2744 } 2751 }
2745 2752
@@ -2961,6 +2968,9 @@ static int crypt_message(struct dm_target *ti, unsigned argc, char **argv)
2961 return ret; 2968 return ret;
2962 if (cc->iv_gen_ops && cc->iv_gen_ops->init) 2969 if (cc->iv_gen_ops && cc->iv_gen_ops->init)
2963 ret = cc->iv_gen_ops->init(cc); 2970 ret = cc->iv_gen_ops->init(cc);
2971 /* wipe the kernel key payload copy */
2972 if (cc->key_string)
2973 memset(cc->key, 0, cc->key_size * sizeof(u8));
2964 return ret; 2974 return ret;
2965 } 2975 }
2966 if (argc == 2 && !strcasecmp(argv[1], "wipe")) { 2976 if (argc == 2 && !strcasecmp(argv[1], "wipe")) {
@@ -3007,7 +3017,7 @@ static void crypt_io_hints(struct dm_target *ti, struct queue_limits *limits)
3007 3017
3008static struct target_type crypt_target = { 3018static struct target_type crypt_target = {
3009 .name = "crypt", 3019 .name = "crypt",
3010 .version = {1, 18, 0}, 3020 .version = {1, 18, 1},
3011 .module = THIS_MODULE, 3021 .module = THIS_MODULE,
3012 .ctr = crypt_ctr, 3022 .ctr = crypt_ctr,
3013 .dtr = crypt_dtr, 3023 .dtr = crypt_dtr,
diff --git a/drivers/md/dm-integrity.c b/drivers/md/dm-integrity.c
index 05c7bfd0c9d9..46d7c8749222 100644
--- a/drivers/md/dm-integrity.c
+++ b/drivers/md/dm-integrity.c
@@ -2559,7 +2559,8 @@ static int create_journal(struct dm_integrity_c *ic, char **error)
2559 int r = 0; 2559 int r = 0;
2560 unsigned i; 2560 unsigned i;
2561 __u64 journal_pages, journal_desc_size, journal_tree_size; 2561 __u64 journal_pages, journal_desc_size, journal_tree_size;
2562 unsigned char *crypt_data = NULL; 2562 unsigned char *crypt_data = NULL, *crypt_iv = NULL;
2563 struct skcipher_request *req = NULL;
2563 2564
2564 ic->commit_ids[0] = cpu_to_le64(0x1111111111111111ULL); 2565 ic->commit_ids[0] = cpu_to_le64(0x1111111111111111ULL);
2565 ic->commit_ids[1] = cpu_to_le64(0x2222222222222222ULL); 2566 ic->commit_ids[1] = cpu_to_le64(0x2222222222222222ULL);
@@ -2617,9 +2618,20 @@ static int create_journal(struct dm_integrity_c *ic, char **error)
2617 2618
2618 if (blocksize == 1) { 2619 if (blocksize == 1) {
2619 struct scatterlist *sg; 2620 struct scatterlist *sg;
2620 SKCIPHER_REQUEST_ON_STACK(req, ic->journal_crypt); 2621
2621 unsigned char iv[ivsize]; 2622 req = skcipher_request_alloc(ic->journal_crypt, GFP_KERNEL);
2622 skcipher_request_set_tfm(req, ic->journal_crypt); 2623 if (!req) {
2624 *error = "Could not allocate crypt request";
2625 r = -ENOMEM;
2626 goto bad;
2627 }
2628
2629 crypt_iv = kmalloc(ivsize, GFP_KERNEL);
2630 if (!crypt_iv) {
2631 *error = "Could not allocate iv";
2632 r = -ENOMEM;
2633 goto bad;
2634 }
2623 2635
2624 ic->journal_xor = dm_integrity_alloc_page_list(ic); 2636 ic->journal_xor = dm_integrity_alloc_page_list(ic);
2625 if (!ic->journal_xor) { 2637 if (!ic->journal_xor) {
@@ -2641,9 +2653,9 @@ static int create_journal(struct dm_integrity_c *ic, char **error)
2641 sg_set_buf(&sg[i], va, PAGE_SIZE); 2653 sg_set_buf(&sg[i], va, PAGE_SIZE);
2642 } 2654 }
2643 sg_set_buf(&sg[i], &ic->commit_ids, sizeof ic->commit_ids); 2655 sg_set_buf(&sg[i], &ic->commit_ids, sizeof ic->commit_ids);
2644 memset(iv, 0x00, ivsize); 2656 memset(crypt_iv, 0x00, ivsize);
2645 2657
2646 skcipher_request_set_crypt(req, sg, sg, PAGE_SIZE * ic->journal_pages + sizeof ic->commit_ids, iv); 2658 skcipher_request_set_crypt(req, sg, sg, PAGE_SIZE * ic->journal_pages + sizeof ic->commit_ids, crypt_iv);
2647 init_completion(&comp.comp); 2659 init_completion(&comp.comp);
2648 comp.in_flight = (atomic_t)ATOMIC_INIT(1); 2660 comp.in_flight = (atomic_t)ATOMIC_INIT(1);
2649 if (do_crypt(true, req, &comp)) 2661 if (do_crypt(true, req, &comp))
@@ -2659,10 +2671,22 @@ static int create_journal(struct dm_integrity_c *ic, char **error)
2659 crypto_free_skcipher(ic->journal_crypt); 2671 crypto_free_skcipher(ic->journal_crypt);
2660 ic->journal_crypt = NULL; 2672 ic->journal_crypt = NULL;
2661 } else { 2673 } else {
2662 SKCIPHER_REQUEST_ON_STACK(req, ic->journal_crypt);
2663 unsigned char iv[ivsize];
2664 unsigned crypt_len = roundup(ivsize, blocksize); 2674 unsigned crypt_len = roundup(ivsize, blocksize);
2665 2675
2676 req = skcipher_request_alloc(ic->journal_crypt, GFP_KERNEL);
2677 if (!req) {
2678 *error = "Could not allocate crypt request";
2679 r = -ENOMEM;
2680 goto bad;
2681 }
2682
2683 crypt_iv = kmalloc(ivsize, GFP_KERNEL);
2684 if (!crypt_iv) {
2685 *error = "Could not allocate iv";
2686 r = -ENOMEM;
2687 goto bad;
2688 }
2689
2666 crypt_data = kmalloc(crypt_len, GFP_KERNEL); 2690 crypt_data = kmalloc(crypt_len, GFP_KERNEL);
2667 if (!crypt_data) { 2691 if (!crypt_data) {
2668 *error = "Unable to allocate crypt data"; 2692 *error = "Unable to allocate crypt data";
@@ -2670,8 +2694,6 @@ static int create_journal(struct dm_integrity_c *ic, char **error)
2670 goto bad; 2694 goto bad;
2671 } 2695 }
2672 2696
2673 skcipher_request_set_tfm(req, ic->journal_crypt);
2674
2675 ic->journal_scatterlist = dm_integrity_alloc_journal_scatterlist(ic, ic->journal); 2697 ic->journal_scatterlist = dm_integrity_alloc_journal_scatterlist(ic, ic->journal);
2676 if (!ic->journal_scatterlist) { 2698 if (!ic->journal_scatterlist) {
2677 *error = "Unable to allocate sg list"; 2699 *error = "Unable to allocate sg list";
@@ -2695,12 +2717,12 @@ static int create_journal(struct dm_integrity_c *ic, char **error)
2695 struct skcipher_request *section_req; 2717 struct skcipher_request *section_req;
2696 __u32 section_le = cpu_to_le32(i); 2718 __u32 section_le = cpu_to_le32(i);
2697 2719
2698 memset(iv, 0x00, ivsize); 2720 memset(crypt_iv, 0x00, ivsize);
2699 memset(crypt_data, 0x00, crypt_len); 2721 memset(crypt_data, 0x00, crypt_len);
2700 memcpy(crypt_data, &section_le, min((size_t)crypt_len, sizeof(section_le))); 2722 memcpy(crypt_data, &section_le, min((size_t)crypt_len, sizeof(section_le)));
2701 2723
2702 sg_init_one(&sg, crypt_data, crypt_len); 2724 sg_init_one(&sg, crypt_data, crypt_len);
2703 skcipher_request_set_crypt(req, &sg, &sg, crypt_len, iv); 2725 skcipher_request_set_crypt(req, &sg, &sg, crypt_len, crypt_iv);
2704 init_completion(&comp.comp); 2726 init_completion(&comp.comp);
2705 comp.in_flight = (atomic_t)ATOMIC_INIT(1); 2727 comp.in_flight = (atomic_t)ATOMIC_INIT(1);
2706 if (do_crypt(true, req, &comp)) 2728 if (do_crypt(true, req, &comp))
@@ -2758,6 +2780,9 @@ retest_commit_id:
2758 } 2780 }
2759bad: 2781bad:
2760 kfree(crypt_data); 2782 kfree(crypt_data);
2783 kfree(crypt_iv);
2784 skcipher_request_free(req);
2785
2761 return r; 2786 return r;
2762} 2787}
2763 2788
diff --git a/drivers/md/dm-thin-metadata.c b/drivers/md/dm-thin-metadata.c
index d31d18d9727c..36ef284ad086 100644
--- a/drivers/md/dm-thin-metadata.c
+++ b/drivers/md/dm-thin-metadata.c
@@ -80,10 +80,14 @@
80#define SECTOR_TO_BLOCK_SHIFT 3 80#define SECTOR_TO_BLOCK_SHIFT 3
81 81
82/* 82/*
83 * For btree insert:
83 * 3 for btree insert + 84 * 3 for btree insert +
84 * 2 for btree lookup used within space map 85 * 2 for btree lookup used within space map
86 * For btree remove:
87 * 2 for shadow spine +
88 * 4 for rebalance 3 child node
85 */ 89 */
86#define THIN_MAX_CONCURRENT_LOCKS 5 90#define THIN_MAX_CONCURRENT_LOCKS 6
87 91
88/* This should be plenty */ 92/* This should be plenty */
89#define SPACE_MAP_ROOT_SIZE 128 93#define SPACE_MAP_ROOT_SIZE 128
diff --git a/drivers/md/persistent-data/dm-btree.c b/drivers/md/persistent-data/dm-btree.c
index f21ce6a3d4cf..58b319757b1e 100644
--- a/drivers/md/persistent-data/dm-btree.c
+++ b/drivers/md/persistent-data/dm-btree.c
@@ -683,23 +683,8 @@ static int btree_split_beneath(struct shadow_spine *s, uint64_t key)
683 pn->keys[1] = rn->keys[0]; 683 pn->keys[1] = rn->keys[0];
684 memcpy_disk(value_ptr(pn, 1), &val, sizeof(__le64)); 684 memcpy_disk(value_ptr(pn, 1), &val, sizeof(__le64));
685 685
686 /* 686 unlock_block(s->info, left);
687 * rejig the spine. This is ugly, since it knows too 687 unlock_block(s->info, right);
688 * much about the spine
689 */
690 if (s->nodes[0] != new_parent) {
691 unlock_block(s->info, s->nodes[0]);
692 s->nodes[0] = new_parent;
693 }
694 if (key < le64_to_cpu(rn->keys[0])) {
695 unlock_block(s->info, right);
696 s->nodes[1] = left;
697 } else {
698 unlock_block(s->info, left);
699 s->nodes[1] = right;
700 }
701 s->count = 2;
702
703 return 0; 688 return 0;
704} 689}
705 690