aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/md/md.c
diff options
context:
space:
mode:
authorNeilBrown <neilb@suse.de>2014-12-14 20:56:56 -0500
committerNeilBrown <neilb@suse.de>2015-02-03 16:35:52 -0500
commit5c675f83c68fbdf9c0e103c1090b06be747fa62c (patch)
tree9a03f84c7a3bcef7d5e757dc28ce7bd5d205b26a /drivers/md/md.c
parent85572d7c75fd5b9fa3fc911e1c99c68ec74903a0 (diff)
md: make ->congested robust against personality changes.
There is currently no locking around calls to the 'congested' bdi function. If called at an awkward time while an array is being converted from one level (or personality) to another, there is a tiny chance of running code in an unreferenced module etc. So add a 'congested' function to the md_personality operations structure, and call it with appropriate locking from a central 'mddev_congested'. When the array personality is changing the array will be 'suspended' so no IO is processed. If mddev_congested detects this, it simply reports that the array is congested, which is a safe guess. As mddev_suspend calls synchronize_rcu(), mddev_congested can avoid races by included the whole call inside an rcu_read_lock() region. This require that the congested functions for all subordinate devices can be run under rcu_lock. Fortunately this is the case. Signed-off-by: NeilBrown <neilb@suse.de>
Diffstat (limited to 'drivers/md/md.c')
-rw-r--r--drivers/md/md.c22
1 files changed, 20 insertions, 2 deletions
diff --git a/drivers/md/md.c b/drivers/md/md.c
index 17e7fd776034..d45f52edb314 100644
--- a/drivers/md/md.c
+++ b/drivers/md/md.c
@@ -321,9 +321,23 @@ EXPORT_SYMBOL_GPL(mddev_resume);
321 321
322int mddev_congested(struct mddev *mddev, int bits) 322int mddev_congested(struct mddev *mddev, int bits)
323{ 323{
324 return mddev->suspended; 324 struct md_personality *pers = mddev->pers;
325 int ret = 0;
326
327 rcu_read_lock();
328 if (mddev->suspended)
329 ret = 1;
330 else if (pers && pers->congested)
331 ret = pers->congested(mddev, bits);
332 rcu_read_unlock();
333 return ret;
334}
335EXPORT_SYMBOL_GPL(mddev_congested);
336static int md_congested(void *data, int bits)
337{
338 struct mddev *mddev = data;
339 return mddev_congested(mddev, bits);
325} 340}
326EXPORT_SYMBOL(mddev_congested);
327 341
328/* 342/*
329 * Generic flush handling for md 343 * Generic flush handling for md
@@ -4908,6 +4922,10 @@ int md_run(struct mddev *mddev)
4908 bitmap_destroy(mddev); 4922 bitmap_destroy(mddev);
4909 return err; 4923 return err;
4910 } 4924 }
4925 if (mddev->queue) {
4926 mddev->queue->backing_dev_info.congested_data = mddev;
4927 mddev->queue->backing_dev_info.congested_fn = md_congested;
4928 }
4911 if (mddev->pers->sync_request) { 4929 if (mddev->pers->sync_request) {
4912 if (mddev->kobj.sd && 4930 if (mddev->kobj.sd &&
4913 sysfs_create_group(&mddev->kobj, &md_redundancy_group)) 4931 sysfs_create_group(&mddev->kobj, &md_redundancy_group))