aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorKonstantin Khlebnikov <khlebnikov@yandex-team.ru>2016-11-27 11:32:32 -0500
committerShaohua Li <shli@fb.com>2016-11-29 18:53:21 -0500
commite8d7c33232e5fdfa761c3416539bc5b4acd12db5 (patch)
treedf0f691267ac01dfd9d2c52cba92a13354cab646
parent1a0ec5c30c37d29e4435a45e75c896f91af970bd (diff)
md/raid5: limit request size according to implementation limits
Current implementation employ 16bit counter of active stripes in lower bits of bio->bi_phys_segments. If request is big enough to overflow this counter bio will be completed and freed too early. Fortunately this not happens in default configuration because several other limits prevent that: stripe_cache_size * nr_disks effectively limits count of active stripes. And small max_sectors_kb at lower disks prevent that during normal read/write operations. Overflow easily happens in discard if it's enabled by module parameter "devices_handle_discard_safely" and stripe_cache_size is set big enough. This patch limits requests size with 256Mb - 8Kb to prevent overflows. Signed-off-by: Konstantin Khlebnikov <khlebnikov@yandex-team.ru> Cc: Shaohua Li <shli@kernel.org> Cc: Neil Brown <neilb@suse.com> Cc: stable@vger.kernel.org Signed-off-by: Shaohua Li <shli@fb.com>
-rw-r--r--drivers/md/raid5.c9
1 files changed, 9 insertions, 0 deletions
diff --git a/drivers/md/raid5.c b/drivers/md/raid5.c
index db909b9e37df..6bf3c2604a2f 100644
--- a/drivers/md/raid5.c
+++ b/drivers/md/raid5.c
@@ -7108,6 +7108,15 @@ static int raid5_run(struct mddev *mddev)
7108 stripe = (stripe | (stripe-1)) + 1; 7108 stripe = (stripe | (stripe-1)) + 1;
7109 mddev->queue->limits.discard_alignment = stripe; 7109 mddev->queue->limits.discard_alignment = stripe;
7110 mddev->queue->limits.discard_granularity = stripe; 7110 mddev->queue->limits.discard_granularity = stripe;
7111
7112 /*
7113 * We use 16-bit counter of active stripes in bi_phys_segments
7114 * (minus one for over-loaded initialization)
7115 */
7116 blk_queue_max_hw_sectors(mddev->queue, 0xfffe * STRIPE_SECTORS);
7117 blk_queue_max_discard_sectors(mddev->queue,
7118 0xfffe * STRIPE_SECTORS);
7119
7111 /* 7120 /*
7112 * unaligned part of discard request will be ignored, so can't 7121 * unaligned part of discard request will be ignored, so can't
7113 * guarantee discard_zeroes_data 7122 * guarantee discard_zeroes_data