summaryrefslogtreecommitdiffstats
path: root/drivers/md/raid5.c
diff options
context:
space:
mode:
authorNeilBrown <neilb@suse.de>2014-12-14 20:56:56 -0500
committerNeilBrown <neilb@suse.de>2015-02-03 16:35:52 -0500
commit5c675f83c68fbdf9c0e103c1090b06be747fa62c (patch)
tree9a03f84c7a3bcef7d5e757dc28ce7bd5d205b26a /drivers/md/raid5.c
parent85572d7c75fd5b9fa3fc911e1c99c68ec74903a0 (diff)
md: make ->congested robust against personality changes.
There is currently no locking around calls to the 'congested' bdi function. If called at an awkward time while an array is being converted from one level (or personality) to another, there is a tiny chance of running code in an unreferenced module etc. So add a 'congested' function to the md_personality operations structure, and call it with appropriate locking from a central 'mddev_congested'. When the array personality is changing the array will be 'suspended' so no IO is processed. If mddev_congested detects this, it simply reports that the array is congested, which is a safe guess. As mddev_suspend calls synchronize_rcu(), mddev_congested can avoid races by included the whole call inside an rcu_read_lock() region. This require that the congested functions for all subordinate devices can be run under rcu_lock. Fortunately this is the case. Signed-off-by: NeilBrown <neilb@suse.de>
Diffstat (limited to 'drivers/md/raid5.c')
-rw-r--r--drivers/md/raid5.c19
1 files changed, 4 insertions, 15 deletions
diff --git a/drivers/md/raid5.c b/drivers/md/raid5.c
index a03cf2d889bf..502a908149c6 100644
--- a/drivers/md/raid5.c
+++ b/drivers/md/raid5.c
@@ -4149,7 +4149,7 @@ static void activate_bit_delay(struct r5conf *conf,
4149 } 4149 }
4150} 4150}
4151 4151
4152int md_raid5_congested(struct mddev *mddev, int bits) 4152static int raid5_congested(struct mddev *mddev, int bits)
4153{ 4153{
4154 struct r5conf *conf = mddev->private; 4154 struct r5conf *conf = mddev->private;
4155 4155
@@ -4166,15 +4166,6 @@ int md_raid5_congested(struct mddev *mddev, int bits)
4166 4166
4167 return 0; 4167 return 0;
4168} 4168}
4169EXPORT_SYMBOL_GPL(md_raid5_congested);
4170
4171static int raid5_congested(void *data, int bits)
4172{
4173 struct mddev *mddev = data;
4174
4175 return mddev_congested(mddev, bits) ||
4176 md_raid5_congested(mddev, bits);
4177}
4178 4169
4179/* We want read requests to align with chunks where possible, 4170/* We want read requests to align with chunks where possible,
4180 * but write requests don't need to. 4171 * but write requests don't need to.
@@ -6248,9 +6239,6 @@ static int run(struct mddev *mddev)
6248 6239
6249 blk_queue_merge_bvec(mddev->queue, raid5_mergeable_bvec); 6240 blk_queue_merge_bvec(mddev->queue, raid5_mergeable_bvec);
6250 6241
6251 mddev->queue->backing_dev_info.congested_data = mddev;
6252 mddev->queue->backing_dev_info.congested_fn = raid5_congested;
6253
6254 chunk_size = mddev->chunk_sectors << 9; 6242 chunk_size = mddev->chunk_sectors << 9;
6255 blk_queue_io_min(mddev->queue, chunk_size); 6243 blk_queue_io_min(mddev->queue, chunk_size);
6256 blk_queue_io_opt(mddev->queue, chunk_size * 6244 blk_queue_io_opt(mddev->queue, chunk_size *
@@ -6333,8 +6321,6 @@ static int stop(struct mddev *mddev)
6333 struct r5conf *conf = mddev->private; 6321 struct r5conf *conf = mddev->private;
6334 6322
6335 md_unregister_thread(&mddev->thread); 6323 md_unregister_thread(&mddev->thread);
6336 if (mddev->queue)
6337 mddev->queue->backing_dev_info.congested_fn = NULL;
6338 free_conf(conf); 6324 free_conf(conf);
6339 mddev->private = NULL; 6325 mddev->private = NULL;
6340 mddev->to_remove = &raid5_attrs_group; 6326 mddev->to_remove = &raid5_attrs_group;
@@ -7126,6 +7112,7 @@ static struct md_personality raid6_personality =
7126 .finish_reshape = raid5_finish_reshape, 7112 .finish_reshape = raid5_finish_reshape,
7127 .quiesce = raid5_quiesce, 7113 .quiesce = raid5_quiesce,
7128 .takeover = raid6_takeover, 7114 .takeover = raid6_takeover,
7115 .congested = raid5_congested,
7129}; 7116};
7130static struct md_personality raid5_personality = 7117static struct md_personality raid5_personality =
7131{ 7118{
@@ -7148,6 +7135,7 @@ static struct md_personality raid5_personality =
7148 .finish_reshape = raid5_finish_reshape, 7135 .finish_reshape = raid5_finish_reshape,
7149 .quiesce = raid5_quiesce, 7136 .quiesce = raid5_quiesce,
7150 .takeover = raid5_takeover, 7137 .takeover = raid5_takeover,
7138 .congested = raid5_congested,
7151}; 7139};
7152 7140
7153static struct md_personality raid4_personality = 7141static struct md_personality raid4_personality =
@@ -7171,6 +7159,7 @@ static struct md_personality raid4_personality =
7171 .finish_reshape = raid5_finish_reshape, 7159 .finish_reshape = raid5_finish_reshape,
7172 .quiesce = raid5_quiesce, 7160 .quiesce = raid5_quiesce,
7173 .takeover = raid4_takeover, 7161 .takeover = raid4_takeover,
7162 .congested = raid5_congested,
7174}; 7163};
7175 7164
7176static int __init raid5_init(void) 7165static int __init raid5_init(void)