diff options
author | Martin K. Petersen <martin.petersen@oracle.com> | 2009-06-30 21:13:45 -0400 |
---|---|---|
committer | NeilBrown <neilb@suse.de> | 2009-06-30 21:13:45 -0400 |
commit | 8f6c2e4b325a8e9f8f47febb2fd0ed4fae7d45a9 (patch) | |
tree | 6e383e2ec48b5c90fe07325a7f6ab38ea1a97dfa /drivers/md/raid10.c | |
parent | 5a4f13fad1ab5bd08dea78fc55321e429d83cddf (diff) |
md: Use new topology calls to indicate alignment and I/O sizes
Switch MD over to the new disk_stack_limits() function which checks for
aligment and adjusts preferred I/O sizes when stacking.
Also indicate preferred I/O sizes where applicable.
Signed-off-by: Martin K. Petersen <martin.petersen@oracle.com>
Signed-off-by: Mike Snitzer <snitzer@redhat.com>
Signed-off-by: NeilBrown <neilb@suse.de>
Diffstat (limited to 'drivers/md/raid10.c')
-rw-r--r-- | drivers/md/raid10.c | 19 |
1 files changed, 13 insertions, 6 deletions
diff --git a/drivers/md/raid10.c b/drivers/md/raid10.c index ae12ceafe10c..7298a5e5a183 100644 --- a/drivers/md/raid10.c +++ b/drivers/md/raid10.c | |||
@@ -1151,8 +1151,8 @@ static int raid10_add_disk(mddev_t *mddev, mdk_rdev_t *rdev) | |||
1151 | for ( ; mirror <= last ; mirror++) | 1151 | for ( ; mirror <= last ; mirror++) |
1152 | if ( !(p=conf->mirrors+mirror)->rdev) { | 1152 | if ( !(p=conf->mirrors+mirror)->rdev) { |
1153 | 1153 | ||
1154 | blk_queue_stack_limits(mddev->queue, | 1154 | disk_stack_limits(mddev->gendisk, rdev->bdev, |
1155 | rdev->bdev->bd_disk->queue); | 1155 | rdev->data_offset << 9); |
1156 | /* as we don't honour merge_bvec_fn, we must never risk | 1156 | /* as we don't honour merge_bvec_fn, we must never risk |
1157 | * violating it, so limit ->max_sector to one PAGE, as | 1157 | * violating it, so limit ->max_sector to one PAGE, as |
1158 | * a one page request is never in violation. | 1158 | * a one page request is never in violation. |
@@ -2044,7 +2044,7 @@ raid10_size(mddev_t *mddev, sector_t sectors, int raid_disks) | |||
2044 | static int run(mddev_t *mddev) | 2044 | static int run(mddev_t *mddev) |
2045 | { | 2045 | { |
2046 | conf_t *conf; | 2046 | conf_t *conf; |
2047 | int i, disk_idx; | 2047 | int i, disk_idx, chunk_size; |
2048 | mirror_info_t *disk; | 2048 | mirror_info_t *disk; |
2049 | mdk_rdev_t *rdev; | 2049 | mdk_rdev_t *rdev; |
2050 | int nc, fc, fo; | 2050 | int nc, fc, fo; |
@@ -2130,6 +2130,14 @@ static int run(mddev_t *mddev) | |||
2130 | spin_lock_init(&conf->device_lock); | 2130 | spin_lock_init(&conf->device_lock); |
2131 | mddev->queue->queue_lock = &conf->device_lock; | 2131 | mddev->queue->queue_lock = &conf->device_lock; |
2132 | 2132 | ||
2133 | chunk_size = mddev->chunk_sectors << 9; | ||
2134 | blk_queue_io_min(mddev->queue, chunk_size); | ||
2135 | if (conf->raid_disks % conf->near_copies) | ||
2136 | blk_queue_io_opt(mddev->queue, chunk_size * conf->raid_disks); | ||
2137 | else | ||
2138 | blk_queue_io_opt(mddev->queue, chunk_size * | ||
2139 | (conf->raid_disks / conf->near_copies)); | ||
2140 | |||
2133 | list_for_each_entry(rdev, &mddev->disks, same_set) { | 2141 | list_for_each_entry(rdev, &mddev->disks, same_set) { |
2134 | disk_idx = rdev->raid_disk; | 2142 | disk_idx = rdev->raid_disk; |
2135 | if (disk_idx >= mddev->raid_disks | 2143 | if (disk_idx >= mddev->raid_disks |
@@ -2138,9 +2146,8 @@ static int run(mddev_t *mddev) | |||
2138 | disk = conf->mirrors + disk_idx; | 2146 | disk = conf->mirrors + disk_idx; |
2139 | 2147 | ||
2140 | disk->rdev = rdev; | 2148 | disk->rdev = rdev; |
2141 | 2149 | disk_stack_limits(mddev->gendisk, rdev->bdev, | |
2142 | blk_queue_stack_limits(mddev->queue, | 2150 | rdev->data_offset << 9); |
2143 | rdev->bdev->bd_disk->queue); | ||
2144 | /* as we don't honour merge_bvec_fn, we must never risk | 2151 | /* as we don't honour merge_bvec_fn, we must never risk |
2145 | * violating it, so limit ->max_sector to one PAGE, as | 2152 | * violating it, so limit ->max_sector to one PAGE, as |
2146 | * a one page request is never in violation. | 2153 | * a one page request is never in violation. |