aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorLinus Torvalds <torvalds@linux-foundation.org>2014-08-29 14:49:10 -0400
committerLinus Torvalds <torvalds@linux-foundation.org>2014-08-29 14:49:10 -0400
commitef13c8afa67518e1d173a6f3b95dd02559879421 (patch)
tree0c81aacb11e8d2ddc444d466fbab1e63b14f7239
parent522a15db959f934ac096673e0c4600db0af5b337 (diff)
parentd49ec52ff6ddcda178fc2476a109cf1bd1fa19ed (diff)
Merge tag 'dm-3.17-fix' of git://git.kernel.org/pub/scm/linux/kernel/git/device-mapper/linux-dm
Pull device mapper fix from Mike Snitzer: "Fix a 3.17-rc1 regression introduced by switching the DM crypt target to using per-bio data" * tag 'dm-3.17-fix' of git://git.kernel.org/pub/scm/linux/kernel/git/device-mapper/linux-dm: dm crypt: fix access beyond the end of allocated space
-rw-r--r--drivers/md/dm-crypt.c25
1 files changed, 19 insertions, 6 deletions
diff --git a/drivers/md/dm-crypt.c b/drivers/md/dm-crypt.c
index 2785007e0e46..cd15e0801228 100644
--- a/drivers/md/dm-crypt.c
+++ b/drivers/md/dm-crypt.c
@@ -1688,6 +1688,7 @@ static int crypt_ctr(struct dm_target *ti, unsigned int argc, char **argv)
1688 unsigned int key_size, opt_params; 1688 unsigned int key_size, opt_params;
1689 unsigned long long tmpll; 1689 unsigned long long tmpll;
1690 int ret; 1690 int ret;
1691 size_t iv_size_padding;
1691 struct dm_arg_set as; 1692 struct dm_arg_set as;
1692 const char *opt_string; 1693 const char *opt_string;
1693 char dummy; 1694 char dummy;
@@ -1724,20 +1725,32 @@ static int crypt_ctr(struct dm_target *ti, unsigned int argc, char **argv)
1724 1725
1725 cc->dmreq_start = sizeof(struct ablkcipher_request); 1726 cc->dmreq_start = sizeof(struct ablkcipher_request);
1726 cc->dmreq_start += crypto_ablkcipher_reqsize(any_tfm(cc)); 1727 cc->dmreq_start += crypto_ablkcipher_reqsize(any_tfm(cc));
1727 cc->dmreq_start = ALIGN(cc->dmreq_start, crypto_tfm_ctx_alignment()); 1728 cc->dmreq_start = ALIGN(cc->dmreq_start, __alignof__(struct dm_crypt_request));
1728 cc->dmreq_start += crypto_ablkcipher_alignmask(any_tfm(cc)) & 1729
1729 ~(crypto_tfm_ctx_alignment() - 1); 1730 if (crypto_ablkcipher_alignmask(any_tfm(cc)) < CRYPTO_MINALIGN) {
1731 /* Allocate the padding exactly */
1732 iv_size_padding = -(cc->dmreq_start + sizeof(struct dm_crypt_request))
1733 & crypto_ablkcipher_alignmask(any_tfm(cc));
1734 } else {
1735 /*
1736 * If the cipher requires greater alignment than kmalloc
1737 * alignment, we don't know the exact position of the
1738 * initialization vector. We must assume worst case.
1739 */
1740 iv_size_padding = crypto_ablkcipher_alignmask(any_tfm(cc));
1741 }
1730 1742
1731 cc->req_pool = mempool_create_kmalloc_pool(MIN_IOS, cc->dmreq_start + 1743 cc->req_pool = mempool_create_kmalloc_pool(MIN_IOS, cc->dmreq_start +
1732 sizeof(struct dm_crypt_request) + cc->iv_size); 1744 sizeof(struct dm_crypt_request) + iv_size_padding + cc->iv_size);
1733 if (!cc->req_pool) { 1745 if (!cc->req_pool) {
1734 ti->error = "Cannot allocate crypt request mempool"; 1746 ti->error = "Cannot allocate crypt request mempool";
1735 goto bad; 1747 goto bad;
1736 } 1748 }
1737 1749
1738 cc->per_bio_data_size = ti->per_bio_data_size = 1750 cc->per_bio_data_size = ti->per_bio_data_size =
1739 sizeof(struct dm_crypt_io) + cc->dmreq_start + 1751 ALIGN(sizeof(struct dm_crypt_io) + cc->dmreq_start +
1740 sizeof(struct dm_crypt_request) + cc->iv_size; 1752 sizeof(struct dm_crypt_request) + iv_size_padding + cc->iv_size,
1753 ARCH_KMALLOC_MINALIGN);
1741 1754
1742 cc->page_pool = mempool_create_page_pool(MIN_POOL_PAGES, 0); 1755 cc->page_pool = mempool_create_page_pool(MIN_POOL_PAGES, 0);
1743 if (!cc->page_pool) { 1756 if (!cc->page_pool) {