aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/md/raid0.c
diff options
context:
space:
mode:
authorHeinz Mauelshagen <heinzm@redhat.com>2015-02-13 13:48:01 -0500
committerNeilBrown <neilb@suse.de>2015-04-21 18:00:41 -0400
commit753f2856cda2a130d38ebc3db97bff66c1ef3ca7 (patch)
tree1b390d200431d9f6291a428eb24b99eb796ea99d /drivers/md/raid0.c
parentac8fa4196d205ac8fff3f8932bddbad4f16e4110 (diff)
md raid0: access mddev->queue (request queue member) conditionally because it is not set when accessed from dm-raid
The patch makes 3 references to mddev->queue in the raid0 personality conditional in order to allow for it to be accessed from dm-raid. Mandatory, because md instances underneath dm-raid don't manage a request queue of their own which'd lead to oopses without the patch. Signed-off-by: Heinz Mauelshagen <heinzm@redhat.com> Tested-by: Heinz Mauelshagen <heinzm@redhat.com> Signed-off-by: NeilBrown <neilb@suse.de>
Diffstat (limited to 'drivers/md/raid0.c')
-rw-r--r--drivers/md/raid0.c48
1 files changed, 27 insertions, 21 deletions
diff --git a/drivers/md/raid0.c b/drivers/md/raid0.c
index 3b5d7f704aa3..2cb59a641cd2 100644
--- a/drivers/md/raid0.c
+++ b/drivers/md/raid0.c
@@ -271,14 +271,16 @@ static int create_strip_zones(struct mddev *mddev, struct r0conf **private_conf)
271 goto abort; 271 goto abort;
272 } 272 }
273 273
274 blk_queue_io_min(mddev->queue, mddev->chunk_sectors << 9); 274 if (mddev->queue) {
275 blk_queue_io_opt(mddev->queue, 275 blk_queue_io_min(mddev->queue, mddev->chunk_sectors << 9);
276 (mddev->chunk_sectors << 9) * mddev->raid_disks); 276 blk_queue_io_opt(mddev->queue,
277 277 (mddev->chunk_sectors << 9) * mddev->raid_disks);
278 if (!discard_supported) 278
279 queue_flag_clear_unlocked(QUEUE_FLAG_DISCARD, mddev->queue); 279 if (!discard_supported)
280 else 280 queue_flag_clear_unlocked(QUEUE_FLAG_DISCARD, mddev->queue);
281 queue_flag_set_unlocked(QUEUE_FLAG_DISCARD, mddev->queue); 281 else
282 queue_flag_set_unlocked(QUEUE_FLAG_DISCARD, mddev->queue);
283 }
282 284
283 pr_debug("md/raid0:%s: done.\n", mdname(mddev)); 285 pr_debug("md/raid0:%s: done.\n", mdname(mddev));
284 *private_conf = conf; 286 *private_conf = conf;
@@ -429,9 +431,12 @@ static int raid0_run(struct mddev *mddev)
429 } 431 }
430 if (md_check_no_bitmap(mddev)) 432 if (md_check_no_bitmap(mddev))
431 return -EINVAL; 433 return -EINVAL;
432 blk_queue_max_hw_sectors(mddev->queue, mddev->chunk_sectors); 434
433 blk_queue_max_write_same_sectors(mddev->queue, mddev->chunk_sectors); 435 if (mddev->queue) {
434 blk_queue_max_discard_sectors(mddev->queue, mddev->chunk_sectors); 436 blk_queue_max_hw_sectors(mddev->queue, mddev->chunk_sectors);
437 blk_queue_max_write_same_sectors(mddev->queue, mddev->chunk_sectors);
438 blk_queue_max_discard_sectors(mddev->queue, mddev->chunk_sectors);
439 }
435 440
436 /* if private is not null, we are here after takeover */ 441 /* if private is not null, we are here after takeover */
437 if (mddev->private == NULL) { 442 if (mddev->private == NULL) {
@@ -448,16 +453,17 @@ static int raid0_run(struct mddev *mddev)
448 printk(KERN_INFO "md/raid0:%s: md_size is %llu sectors.\n", 453 printk(KERN_INFO "md/raid0:%s: md_size is %llu sectors.\n",
449 mdname(mddev), 454 mdname(mddev),
450 (unsigned long long)mddev->array_sectors); 455 (unsigned long long)mddev->array_sectors);
451 /* calculate the max read-ahead size. 456
452 * For read-ahead of large files to be effective, we need to 457 if (mddev->queue) {
453 * readahead at least twice a whole stripe. i.e. number of devices 458 /* calculate the max read-ahead size.
454 * multiplied by chunk size times 2. 459 * For read-ahead of large files to be effective, we need to
455 * If an individual device has an ra_pages greater than the 460 * readahead at least twice a whole stripe. i.e. number of devices
456 * chunk size, then we will not drive that device as hard as it 461 * multiplied by chunk size times 2.
457 * wants. We consider this a configuration error: a larger 462 * If an individual device has an ra_pages greater than the
458 * chunksize should be used in that case. 463 * chunk size, then we will not drive that device as hard as it
459 */ 464 * wants. We consider this a configuration error: a larger
460 { 465 * chunksize should be used in that case.
466 */
461 int stripe = mddev->raid_disks * 467 int stripe = mddev->raid_disks *
462 (mddev->chunk_sectors << 9) / PAGE_SIZE; 468 (mddev->chunk_sectors << 9) / PAGE_SIZE;
463 if (mddev->queue->backing_dev_info.ra_pages < 2* stripe) 469 if (mddev->queue->backing_dev_info.ra_pages < 2* stripe)