diff options
author | Jonathan Brassow <jbrassow@redhat.com> | 2012-07-30 20:03:53 -0400 |
---|---|---|
committer | NeilBrown <neilb@suse.de> | 2012-07-30 20:03:53 -0400 |
commit | cc4d1efdd017083bbcbaf23feb4cdc717fa7dab8 (patch) | |
tree | a6a7775a4715ab009c00ec4ef7655eafc307b694 | |
parent | 473e87ce485ffcac041f7911b33f0b4cd4d6cf2b (diff) |
MD RAID10: Export md_raid10_congested
md/raid10: Export is_congested test.
In similar fashion to commits
11d8a6e3719519fbc0e2c9d61b6fa931b84bf813
1ed7242e591af7e233234d483f12d33818b189d9
we export the RAID10 congestion checking function so that dm-raid.c can
make use of it and make use of the personality. The 'queue' and 'gendisk'
structures will not be available to the MD code when device-mapper sets
up the device, so we conditionalize access to these fields also.
Signed-off-by: Jonathan Brassow <jbrassow@redhat.com>
Signed-off-by: NeilBrown <neilb@suse.de>
-rw-r--r-- | drivers/md/raid10.c | 56 | ||||
-rw-r--r-- | drivers/md/raid10.h | 3 |
2 files changed, 37 insertions, 22 deletions
diff --git a/drivers/md/raid10.c b/drivers/md/raid10.c index e77acf024055..e2549deab7c3 100644 --- a/drivers/md/raid10.c +++ b/drivers/md/raid10.c | |||
@@ -853,9 +853,8 @@ retry: | |||
853 | return rdev; | 853 | return rdev; |
854 | } | 854 | } |
855 | 855 | ||
856 | static int raid10_congested(void *data, int bits) | 856 | int md_raid10_congested(struct mddev *mddev, int bits) |
857 | { | 857 | { |
858 | struct mddev *mddev = data; | ||
859 | struct r10conf *conf = mddev->private; | 858 | struct r10conf *conf = mddev->private; |
860 | int i, ret = 0; | 859 | int i, ret = 0; |
861 | 860 | ||
@@ -863,8 +862,6 @@ static int raid10_congested(void *data, int bits) | |||
863 | conf->pending_count >= max_queued_requests) | 862 | conf->pending_count >= max_queued_requests) |
864 | return 1; | 863 | return 1; |
865 | 864 | ||
866 | if (mddev_congested(mddev, bits)) | ||
867 | return 1; | ||
868 | rcu_read_lock(); | 865 | rcu_read_lock(); |
869 | for (i = 0; | 866 | for (i = 0; |
870 | (i < conf->geo.raid_disks || i < conf->prev.raid_disks) | 867 | (i < conf->geo.raid_disks || i < conf->prev.raid_disks) |
@@ -880,6 +877,15 @@ static int raid10_congested(void *data, int bits) | |||
880 | rcu_read_unlock(); | 877 | rcu_read_unlock(); |
881 | return ret; | 878 | return ret; |
882 | } | 879 | } |
880 | EXPORT_SYMBOL_GPL(md_raid10_congested); | ||
881 | |||
882 | static int raid10_congested(void *data, int bits) | ||
883 | { | ||
884 | struct mddev *mddev = data; | ||
885 | |||
886 | return mddev_congested(mddev, bits) || | ||
887 | md_raid10_congested(mddev, bits); | ||
888 | } | ||
883 | 889 | ||
884 | static void flush_pending_writes(struct r10conf *conf) | 890 | static void flush_pending_writes(struct r10conf *conf) |
885 | { | 891 | { |
@@ -3486,12 +3492,14 @@ static int run(struct mddev *mddev) | |||
3486 | conf->thread = NULL; | 3492 | conf->thread = NULL; |
3487 | 3493 | ||
3488 | chunk_size = mddev->chunk_sectors << 9; | 3494 | chunk_size = mddev->chunk_sectors << 9; |
3489 | blk_queue_io_min(mddev->queue, chunk_size); | 3495 | if (mddev->queue) { |
3490 | if (conf->geo.raid_disks % conf->geo.near_copies) | 3496 | blk_queue_io_min(mddev->queue, chunk_size); |
3491 | blk_queue_io_opt(mddev->queue, chunk_size * conf->geo.raid_disks); | 3497 | if (conf->geo.raid_disks % conf->geo.near_copies) |
3492 | else | 3498 | blk_queue_io_opt(mddev->queue, chunk_size * conf->geo.raid_disks); |
3493 | blk_queue_io_opt(mddev->queue, chunk_size * | 3499 | else |
3494 | (conf->geo.raid_disks / conf->geo.near_copies)); | 3500 | blk_queue_io_opt(mddev->queue, chunk_size * |
3501 | (conf->geo.raid_disks / conf->geo.near_copies)); | ||
3502 | } | ||
3495 | 3503 | ||
3496 | rdev_for_each(rdev, mddev) { | 3504 | rdev_for_each(rdev, mddev) { |
3497 | long long diff; | 3505 | long long diff; |
@@ -3525,8 +3533,9 @@ static int run(struct mddev *mddev) | |||
3525 | if (first || diff < min_offset_diff) | 3533 | if (first || diff < min_offset_diff) |
3526 | min_offset_diff = diff; | 3534 | min_offset_diff = diff; |
3527 | 3535 | ||
3528 | disk_stack_limits(mddev->gendisk, rdev->bdev, | 3536 | if (mddev->gendisk) |
3529 | rdev->data_offset << 9); | 3537 | disk_stack_limits(mddev->gendisk, rdev->bdev, |
3538 | rdev->data_offset << 9); | ||
3530 | 3539 | ||
3531 | disk->head_position = 0; | 3540 | disk->head_position = 0; |
3532 | } | 3541 | } |
@@ -3589,22 +3598,22 @@ static int run(struct mddev *mddev) | |||
3589 | md_set_array_sectors(mddev, size); | 3598 | md_set_array_sectors(mddev, size); |
3590 | mddev->resync_max_sectors = size; | 3599 | mddev->resync_max_sectors = size; |
3591 | 3600 | ||
3592 | mddev->queue->backing_dev_info.congested_fn = raid10_congested; | 3601 | if (mddev->queue) { |
3593 | mddev->queue->backing_dev_info.congested_data = mddev; | ||
3594 | |||
3595 | /* Calculate max read-ahead size. | ||
3596 | * We need to readahead at least twice a whole stripe.... | ||
3597 | * maybe... | ||
3598 | */ | ||
3599 | { | ||
3600 | int stripe = conf->geo.raid_disks * | 3602 | int stripe = conf->geo.raid_disks * |
3601 | ((mddev->chunk_sectors << 9) / PAGE_SIZE); | 3603 | ((mddev->chunk_sectors << 9) / PAGE_SIZE); |
3604 | mddev->queue->backing_dev_info.congested_fn = raid10_congested; | ||
3605 | mddev->queue->backing_dev_info.congested_data = mddev; | ||
3606 | |||
3607 | /* Calculate max read-ahead size. | ||
3608 | * We need to readahead at least twice a whole stripe.... | ||
3609 | * maybe... | ||
3610 | */ | ||
3602 | stripe /= conf->geo.near_copies; | 3611 | stripe /= conf->geo.near_copies; |
3603 | if (mddev->queue->backing_dev_info.ra_pages < 2 * stripe) | 3612 | if (mddev->queue->backing_dev_info.ra_pages < 2 * stripe) |
3604 | mddev->queue->backing_dev_info.ra_pages = 2 * stripe; | 3613 | mddev->queue->backing_dev_info.ra_pages = 2 * stripe; |
3614 | blk_queue_merge_bvec(mddev->queue, raid10_mergeable_bvec); | ||
3605 | } | 3615 | } |
3606 | 3616 | ||
3607 | blk_queue_merge_bvec(mddev->queue, raid10_mergeable_bvec); | ||
3608 | 3617 | ||
3609 | if (md_integrity_register(mddev)) | 3618 | if (md_integrity_register(mddev)) |
3610 | goto out_free_conf; | 3619 | goto out_free_conf; |
@@ -3655,7 +3664,10 @@ static int stop(struct mddev *mddev) | |||
3655 | lower_barrier(conf); | 3664 | lower_barrier(conf); |
3656 | 3665 | ||
3657 | md_unregister_thread(&mddev->thread); | 3666 | md_unregister_thread(&mddev->thread); |
3658 | blk_sync_queue(mddev->queue); /* the unplug fn references 'conf'*/ | 3667 | if (mddev->queue) |
3668 | /* the unplug fn references 'conf'*/ | ||
3669 | blk_sync_queue(mddev->queue); | ||
3670 | |||
3659 | if (conf->r10bio_pool) | 3671 | if (conf->r10bio_pool) |
3660 | mempool_destroy(conf->r10bio_pool); | 3672 | mempool_destroy(conf->r10bio_pool); |
3661 | kfree(conf->mirrors); | 3673 | kfree(conf->mirrors); |
diff --git a/drivers/md/raid10.h b/drivers/md/raid10.h index b0a435869dca..007c2c68dd83 100644 --- a/drivers/md/raid10.h +++ b/drivers/md/raid10.h | |||
@@ -145,4 +145,7 @@ enum r10bio_state { | |||
145 | */ | 145 | */ |
146 | R10BIO_Previous, | 146 | R10BIO_Previous, |
147 | }; | 147 | }; |
148 | |||
149 | extern int md_raid10_congested(struct mddev *mddev, int bits); | ||
150 | |||
148 | #endif | 151 | #endif |