aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/md/dm-crypt.c
diff options
context:
space:
mode:
authorMilan Broz <mbroz@redhat.com>2007-12-13 09:16:10 -0500
committerAlasdair G Kergon <agk@redhat.com>2007-12-20 12:32:13 -0500
commit91e106259214b40e992a58fb9417da46868e19b2 (patch)
treed40a2f0aa45427a66d024ce1b29ea7cb24cc770e /drivers/md/dm-crypt.c
parent91212507f93778c09d4c1335207b6f4b995f5ad1 (diff)
dm crypt: use bio_add_page
Fix possible max_phys_segments violation in cloned dm-crypt bio. In write operation dm-crypt needs to allocate new bio request and run crypto operation on this clone. Cloned request has always the same size, but number of physical segments can be increased and violate max_phys_segments restriction. This can lead to data corruption and serious hardware malfunction. This was observed when using XFS over dm-crypt and at least two HBA controller drivers (arcmsr, cciss) recently. Fix it by using bio_add_page() call (which tests for other restrictions too) instead of constructing own biovec. All versions of dm-crypt are affected by this bug. Cc: stable@kernel.org Cc: dm-crypt@saout.de Signed-off-by: Milan Broz <mbroz@redhat.com> Signed-off-by: Alasdair G Kergon <agk@redhat.com>
Diffstat (limited to 'drivers/md/dm-crypt.c')
-rw-r--r--drivers/md/dm-crypt.c24
1 files changed, 11 insertions, 13 deletions
diff --git a/drivers/md/dm-crypt.c b/drivers/md/dm-crypt.c
index 30d51a0c0116..6b66ee46b87d 100644
--- a/drivers/md/dm-crypt.c
+++ b/drivers/md/dm-crypt.c
@@ -398,7 +398,8 @@ static struct bio *crypt_alloc_buffer(struct dm_crypt_io *io, unsigned size)
398 struct bio *clone; 398 struct bio *clone;
399 unsigned int nr_iovecs = (size + PAGE_SIZE - 1) >> PAGE_SHIFT; 399 unsigned int nr_iovecs = (size + PAGE_SIZE - 1) >> PAGE_SHIFT;
400 gfp_t gfp_mask = GFP_NOIO | __GFP_HIGHMEM; 400 gfp_t gfp_mask = GFP_NOIO | __GFP_HIGHMEM;
401 unsigned int i; 401 unsigned i, len;
402 struct page *page;
402 403
403 clone = bio_alloc_bioset(GFP_NOIO, nr_iovecs, cc->bs); 404 clone = bio_alloc_bioset(GFP_NOIO, nr_iovecs, cc->bs);
404 if (!clone) 405 if (!clone)
@@ -407,10 +408,8 @@ static struct bio *crypt_alloc_buffer(struct dm_crypt_io *io, unsigned size)
407 clone_init(io, clone); 408 clone_init(io, clone);
408 409
409 for (i = 0; i < nr_iovecs; i++) { 410 for (i = 0; i < nr_iovecs; i++) {
410 struct bio_vec *bv = bio_iovec_idx(clone, i); 411 page = mempool_alloc(cc->page_pool, gfp_mask);
411 412 if (!page)
412 bv->bv_page = mempool_alloc(cc->page_pool, gfp_mask);
413 if (!bv->bv_page)
414 break; 413 break;
415 414
416 /* 415 /*
@@ -421,15 +420,14 @@ static struct bio *crypt_alloc_buffer(struct dm_crypt_io *io, unsigned size)
421 if (i == (MIN_BIO_PAGES - 1)) 420 if (i == (MIN_BIO_PAGES - 1))
422 gfp_mask = (gfp_mask | __GFP_NOWARN) & ~__GFP_WAIT; 421 gfp_mask = (gfp_mask | __GFP_NOWARN) & ~__GFP_WAIT;
423 422
424 bv->bv_offset = 0; 423 len = (size > PAGE_SIZE) ? PAGE_SIZE : size;
425 if (size > PAGE_SIZE) 424
426 bv->bv_len = PAGE_SIZE; 425 if (!bio_add_page(clone, page, len, 0)) {
427 else 426 mempool_free(page, cc->page_pool);
428 bv->bv_len = size; 427 break;
428 }
429 429
430 clone->bi_size += bv->bv_len; 430 size -= len;
431 clone->bi_vcnt++;
432 size -= bv->bv_len;
433 } 431 }
434 432
435 if (!clone->bi_size) { 433 if (!clone->bi_size) {