aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/md/raid1.c
diff options
context:
space:
mode:
authorNeilBrown <neilb@suse.de>2014-12-14 20:56:56 -0500
committerNeilBrown <neilb@suse.de>2015-02-03 16:35:52 -0500
commit5c675f83c68fbdf9c0e103c1090b06be747fa62c (patch)
tree9a03f84c7a3bcef7d5e757dc28ce7bd5d205b26a /drivers/md/raid1.c
parent85572d7c75fd5b9fa3fc911e1c99c68ec74903a0 (diff)
md: make ->congested robust against personality changes.
There is currently no locking around calls to the 'congested' bdi function. If called at an awkward time while an array is being converted from one level (or personality) to another, there is a tiny chance of running code in an unreferenced module etc. So add a 'congested' function to the md_personality operations structure, and call it with appropriate locking from a central 'mddev_congested'. When the array personality is changing the array will be 'suspended' so no IO is processed. If mddev_congested detects this, it simply reports that the array is congested, which is a safe guess. As mddev_suspend calls synchronize_rcu(), mddev_congested can avoid races by included the whole call inside an rcu_read_lock() region. This require that the congested functions for all subordinate devices can be run under rcu_lock. Fortunately this is the case. Signed-off-by: NeilBrown <neilb@suse.de>
Diffstat (limited to 'drivers/md/raid1.c')
-rw-r--r--drivers/md/raid1.c14
1 files changed, 2 insertions, 12 deletions
diff --git a/drivers/md/raid1.c b/drivers/md/raid1.c
index 40b35be34f8d..9ad7ce7091be 100644
--- a/drivers/md/raid1.c
+++ b/drivers/md/raid1.c
@@ -734,7 +734,7 @@ static int raid1_mergeable_bvec(struct request_queue *q,
734 734
735} 735}
736 736
737int md_raid1_congested(struct mddev *mddev, int bits) 737static int raid1_congested(struct mddev *mddev, int bits)
738{ 738{
739 struct r1conf *conf = mddev->private; 739 struct r1conf *conf = mddev->private;
740 int i, ret = 0; 740 int i, ret = 0;
@@ -763,15 +763,6 @@ int md_raid1_congested(struct mddev *mddev, int bits)
763 rcu_read_unlock(); 763 rcu_read_unlock();
764 return ret; 764 return ret;
765} 765}
766EXPORT_SYMBOL_GPL(md_raid1_congested);
767
768static int raid1_congested(void *data, int bits)
769{
770 struct mddev *mddev = data;
771
772 return mddev_congested(mddev, bits) ||
773 md_raid1_congested(mddev, bits);
774}
775 766
776static void flush_pending_writes(struct r1conf *conf) 767static void flush_pending_writes(struct r1conf *conf)
777{ 768{
@@ -2955,8 +2946,6 @@ static int run(struct mddev *mddev)
2955 md_set_array_sectors(mddev, raid1_size(mddev, 0, 0)); 2946 md_set_array_sectors(mddev, raid1_size(mddev, 0, 0));
2956 2947
2957 if (mddev->queue) { 2948 if (mddev->queue) {
2958 mddev->queue->backing_dev_info.congested_fn = raid1_congested;
2959 mddev->queue->backing_dev_info.congested_data = mddev;
2960 blk_queue_merge_bvec(mddev->queue, raid1_mergeable_bvec); 2949 blk_queue_merge_bvec(mddev->queue, raid1_mergeable_bvec);
2961 2950
2962 if (discard_supported) 2951 if (discard_supported)
@@ -3193,6 +3182,7 @@ static struct md_personality raid1_personality =
3193 .check_reshape = raid1_reshape, 3182 .check_reshape = raid1_reshape,
3194 .quiesce = raid1_quiesce, 3183 .quiesce = raid1_quiesce,
3195 .takeover = raid1_takeover, 3184 .takeover = raid1_takeover,
3185 .congested = raid1_congested,
3196}; 3186};
3197 3187
3198static int __init raid_init(void) 3188static int __init raid_init(void)