summaryrefslogtreecommitdiffstats
path: root/drivers/md/raid5.c
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/md/raid5.c')
-rw-r--r--drivers/md/raid5.c19
1 files changed, 4 insertions, 15 deletions
diff --git a/drivers/md/raid5.c b/drivers/md/raid5.c
index a03cf2d889bf..502a908149c6 100644
--- a/drivers/md/raid5.c
+++ b/drivers/md/raid5.c
@@ -4149,7 +4149,7 @@ static void activate_bit_delay(struct r5conf *conf,
4149 } 4149 }
4150} 4150}
4151 4151
4152int md_raid5_congested(struct mddev *mddev, int bits) 4152static int raid5_congested(struct mddev *mddev, int bits)
4153{ 4153{
4154 struct r5conf *conf = mddev->private; 4154 struct r5conf *conf = mddev->private;
4155 4155
@@ -4166,15 +4166,6 @@ int md_raid5_congested(struct mddev *mddev, int bits)
4166 4166
4167 return 0; 4167 return 0;
4168} 4168}
4169EXPORT_SYMBOL_GPL(md_raid5_congested);
4170
4171static int raid5_congested(void *data, int bits)
4172{
4173 struct mddev *mddev = data;
4174
4175 return mddev_congested(mddev, bits) ||
4176 md_raid5_congested(mddev, bits);
4177}
4178 4169
4179/* We want read requests to align with chunks where possible, 4170/* We want read requests to align with chunks where possible,
4180 * but write requests don't need to. 4171 * but write requests don't need to.
@@ -6248,9 +6239,6 @@ static int run(struct mddev *mddev)
6248 6239
6249 blk_queue_merge_bvec(mddev->queue, raid5_mergeable_bvec); 6240 blk_queue_merge_bvec(mddev->queue, raid5_mergeable_bvec);
6250 6241
6251 mddev->queue->backing_dev_info.congested_data = mddev;
6252 mddev->queue->backing_dev_info.congested_fn = raid5_congested;
6253
6254 chunk_size = mddev->chunk_sectors << 9; 6242 chunk_size = mddev->chunk_sectors << 9;
6255 blk_queue_io_min(mddev->queue, chunk_size); 6243 blk_queue_io_min(mddev->queue, chunk_size);
6256 blk_queue_io_opt(mddev->queue, chunk_size * 6244 blk_queue_io_opt(mddev->queue, chunk_size *
@@ -6333,8 +6321,6 @@ static int stop(struct mddev *mddev)
6333 struct r5conf *conf = mddev->private; 6321 struct r5conf *conf = mddev->private;
6334 6322
6335 md_unregister_thread(&mddev->thread); 6323 md_unregister_thread(&mddev->thread);
6336 if (mddev->queue)
6337 mddev->queue->backing_dev_info.congested_fn = NULL;
6338 free_conf(conf); 6324 free_conf(conf);
6339 mddev->private = NULL; 6325 mddev->private = NULL;
6340 mddev->to_remove = &raid5_attrs_group; 6326 mddev->to_remove = &raid5_attrs_group;
@@ -7126,6 +7112,7 @@ static struct md_personality raid6_personality =
7126 .finish_reshape = raid5_finish_reshape, 7112 .finish_reshape = raid5_finish_reshape,
7127 .quiesce = raid5_quiesce, 7113 .quiesce = raid5_quiesce,
7128 .takeover = raid6_takeover, 7114 .takeover = raid6_takeover,
7115 .congested = raid5_congested,
7129}; 7116};
7130static struct md_personality raid5_personality = 7117static struct md_personality raid5_personality =
7131{ 7118{
@@ -7148,6 +7135,7 @@ static struct md_personality raid5_personality =
7148 .finish_reshape = raid5_finish_reshape, 7135 .finish_reshape = raid5_finish_reshape,
7149 .quiesce = raid5_quiesce, 7136 .quiesce = raid5_quiesce,
7150 .takeover = raid5_takeover, 7137 .takeover = raid5_takeover,
7138 .congested = raid5_congested,
7151}; 7139};
7152 7140
7153static struct md_personality raid4_personality = 7141static struct md_personality raid4_personality =
@@ -7171,6 +7159,7 @@ static struct md_personality raid4_personality =
7171 .finish_reshape = raid5_finish_reshape, 7159 .finish_reshape = raid5_finish_reshape,
7172 .quiesce = raid5_quiesce, 7160 .quiesce = raid5_quiesce,
7173 .takeover = raid4_takeover, 7161 .takeover = raid4_takeover,
7162 .congested = raid5_congested,
7174}; 7163};
7175 7164
7176static int __init raid5_init(void) 7165static int __init raid5_init(void)