aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/md
diff options
context:
space:
mode:
authorNeilBrown <neilb@suse.de>2014-12-14 20:56:57 -0500
committerNeilBrown <neilb@suse.de>2015-02-03 16:35:52 -0500
commit3be260cc18f850873cd32381158e28b0a9a391fd (patch)
tree2400b6a0774d047cf3cc90c8471f8d91ec89908c /drivers/md
parent64590f45ddc7147fa1968147a1f5b5c436b728fe (diff)
md/linear: remove rcu protections in favour of suspend/resume
The use of 'rcu' to protect accesses to ->private_data so that the ->private_data could be updated predates the introduction of mddev_suspend/mddev_resume. These are a cleaner mechanism for providing stability while swapping in a new ->private data - it is used by level_store() to support changing of raid levels. So get rid of the RCU stuff and just use mddev_suspend, mddev_resume. As these function call ->quiesce(), we add an empty function for linear just like for raid0. Signed-off-by: NeilBrown <neilb@suse.de>
Diffstat (limited to 'drivers/md')
-rw-r--r--drivers/md/linear.c44
1 files changed, 14 insertions, 30 deletions
diff --git a/drivers/md/linear.c b/drivers/md/linear.c
index 4c2a92ce2b0b..b3e717adbc9b 100644
--- a/drivers/md/linear.c
+++ b/drivers/md/linear.c
@@ -34,7 +34,7 @@ static inline struct dev_info *which_dev(struct mddev *mddev, sector_t sector)
34 34
35 lo = 0; 35 lo = 0;
36 hi = mddev->raid_disks - 1; 36 hi = mddev->raid_disks - 1;
37 conf = rcu_dereference(mddev->private); 37 conf = mddev->private;
38 38
39 /* 39 /*
40 * Binary Search 40 * Binary Search
@@ -70,7 +70,6 @@ static int linear_mergeable_bvec(struct mddev *mddev,
70 int maxbytes = biovec->bv_len; 70 int maxbytes = biovec->bv_len;
71 struct request_queue *subq; 71 struct request_queue *subq;
72 72
73 rcu_read_lock();
74 dev0 = which_dev(mddev, sector); 73 dev0 = which_dev(mddev, sector);
75 maxsectors = dev0->end_sector - sector; 74 maxsectors = dev0->end_sector - sector;
76 subq = bdev_get_queue(dev0->rdev->bdev); 75 subq = bdev_get_queue(dev0->rdev->bdev);
@@ -80,7 +79,6 @@ static int linear_mergeable_bvec(struct mddev *mddev,
80 maxbytes = min(maxbytes, subq->merge_bvec_fn(subq, bvm, 79 maxbytes = min(maxbytes, subq->merge_bvec_fn(subq, bvm,
81 biovec)); 80 biovec));
82 } 81 }
83 rcu_read_unlock();
84 82
85 if (maxsectors < bio_sectors) 83 if (maxsectors < bio_sectors)
86 maxsectors = 0; 84 maxsectors = 0;
@@ -101,15 +99,13 @@ static int linear_congested(struct mddev *mddev, int bits)
101 struct linear_conf *conf; 99 struct linear_conf *conf;
102 int i, ret = 0; 100 int i, ret = 0;
103 101
104 rcu_read_lock(); 102 conf = mddev->private;
105 conf = rcu_dereference(mddev->private);
106 103
107 for (i = 0; i < mddev->raid_disks && !ret ; i++) { 104 for (i = 0; i < mddev->raid_disks && !ret ; i++) {
108 struct request_queue *q = bdev_get_queue(conf->disks[i].rdev->bdev); 105 struct request_queue *q = bdev_get_queue(conf->disks[i].rdev->bdev);
109 ret |= bdi_congested(&q->backing_dev_info, bits); 106 ret |= bdi_congested(&q->backing_dev_info, bits);
110 } 107 }
111 108
112 rcu_read_unlock();
113 return ret; 109 return ret;
114} 110}
115 111
@@ -118,12 +114,10 @@ static sector_t linear_size(struct mddev *mddev, sector_t sectors, int raid_disk
118 struct linear_conf *conf; 114 struct linear_conf *conf;
119 sector_t array_sectors; 115 sector_t array_sectors;
120 116
121 rcu_read_lock(); 117 conf = mddev->private;
122 conf = rcu_dereference(mddev->private);
123 WARN_ONCE(sectors || raid_disks, 118 WARN_ONCE(sectors || raid_disks,
124 "%s does not support generic reshape\n", __func__); 119 "%s does not support generic reshape\n", __func__);
125 array_sectors = conf->array_sectors; 120 array_sectors = conf->array_sectors;
126 rcu_read_unlock();
127 121
128 return array_sectors; 122 return array_sectors;
129} 123}
@@ -243,33 +237,22 @@ static int linear_add(struct mddev *mddev, struct md_rdev *rdev)
243 if (!newconf) 237 if (!newconf)
244 return -ENOMEM; 238 return -ENOMEM;
245 239
246 oldconf = rcu_dereference_protected(mddev->private, 240 mddev_suspend(mddev);
247 lockdep_is_held( 241 oldconf = mddev->private;
248 &mddev->reconfig_mutex));
249 mddev->raid_disks++; 242 mddev->raid_disks++;
250 rcu_assign_pointer(mddev->private, newconf); 243 mddev->private = newconf;
251 md_set_array_sectors(mddev, linear_size(mddev, 0, 0)); 244 md_set_array_sectors(mddev, linear_size(mddev, 0, 0));
252 set_capacity(mddev->gendisk, mddev->array_sectors); 245 set_capacity(mddev->gendisk, mddev->array_sectors);
246 mddev_resume(mddev);
253 revalidate_disk(mddev->gendisk); 247 revalidate_disk(mddev->gendisk);
254 kfree_rcu(oldconf, rcu); 248 kfree(oldconf);
255 return 0; 249 return 0;
256} 250}
257 251
258static int linear_stop (struct mddev *mddev) 252static int linear_stop (struct mddev *mddev)
259{ 253{
260 struct linear_conf *conf = 254 struct linear_conf *conf = mddev->private;
261 rcu_dereference_protected(mddev->private,
262 lockdep_is_held(
263 &mddev->reconfig_mutex));
264 255
265 /*
266 * We do not require rcu protection here since
267 * we hold reconfig_mutex for both linear_add and
268 * linear_stop, so they cannot race.
269 * We should make sure any old 'conf's are properly
270 * freed though.
271 */
272 rcu_barrier();
273 blk_sync_queue(mddev->queue); /* the unplug fn references 'conf'*/ 256 blk_sync_queue(mddev->queue); /* the unplug fn references 'conf'*/
274 kfree(conf); 257 kfree(conf);
275 mddev->private = NULL; 258 mddev->private = NULL;
@@ -290,16 +273,12 @@ static void linear_make_request(struct mddev *mddev, struct bio *bio)
290 } 273 }
291 274
292 do { 275 do {
293 rcu_read_lock();
294
295 tmp_dev = which_dev(mddev, bio->bi_iter.bi_sector); 276 tmp_dev = which_dev(mddev, bio->bi_iter.bi_sector);
296 start_sector = tmp_dev->end_sector - tmp_dev->rdev->sectors; 277 start_sector = tmp_dev->end_sector - tmp_dev->rdev->sectors;
297 end_sector = tmp_dev->end_sector; 278 end_sector = tmp_dev->end_sector;
298 data_offset = tmp_dev->rdev->data_offset; 279 data_offset = tmp_dev->rdev->data_offset;
299 bio->bi_bdev = tmp_dev->rdev->bdev; 280 bio->bi_bdev = tmp_dev->rdev->bdev;
300 281
301 rcu_read_unlock();
302
303 if (unlikely(bio->bi_iter.bi_sector >= end_sector || 282 if (unlikely(bio->bi_iter.bi_sector >= end_sector ||
304 bio->bi_iter.bi_sector < start_sector)) 283 bio->bi_iter.bi_sector < start_sector))
305 goto out_of_bounds; 284 goto out_of_bounds;
@@ -346,6 +325,10 @@ static void linear_status (struct seq_file *seq, struct mddev *mddev)
346 seq_printf(seq, " %dk rounding", mddev->chunk_sectors / 2); 325 seq_printf(seq, " %dk rounding", mddev->chunk_sectors / 2);
347} 326}
348 327
328static void linear_quiesce(struct mddev *mddev, int state)
329{
330}
331
349static struct md_personality linear_personality = 332static struct md_personality linear_personality =
350{ 333{
351 .name = "linear", 334 .name = "linear",
@@ -357,6 +340,7 @@ static struct md_personality linear_personality =
357 .status = linear_status, 340 .status = linear_status,
358 .hot_add_disk = linear_add, 341 .hot_add_disk = linear_add,
359 .size = linear_size, 342 .size = linear_size,
343 .quiesce = linear_quiesce,
360 .congested = linear_congested, 344 .congested = linear_congested,
361 .mergeable_bvec = linear_mergeable_bvec, 345 .mergeable_bvec = linear_mergeable_bvec,
362}; 346};