aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/md/raid10.c
diff options
context:
space:
mode:
authorTejun Heo <tj@kernel.org>2009-07-03 18:13:18 -0400
committerTejun Heo <tj@kernel.org>2009-07-03 18:13:18 -0400
commitc43768cbb7655ea5ff782ae250f6e2ef4297cf98 (patch)
tree3982e41dde3eecaa3739a5d1a8ed18d04bd74f01 /drivers/md/raid10.c
parent1a8dd307cc0a2119be4e578c517795464e6dabba (diff)
parent746a99a5af60ee676afa2ba469ccd1373493c7e7 (diff)
Merge branch 'master' into for-next
Pull linus#master to merge PER_CPU_DEF_ATTRIBUTES and alpha build fix changes. As alpha in percpu tree uses 'weak' attribute instead of inline assembly, there's no need for __used attribute. Conflicts: arch/alpha/include/asm/percpu.h arch/mn10300/kernel/vmlinux.lds.S include/linux/percpu-defs.h
Diffstat (limited to 'drivers/md/raid10.c')
-rw-r--r--drivers/md/raid10.c19
1 files changed, 13 insertions, 6 deletions
diff --git a/drivers/md/raid10.c b/drivers/md/raid10.c
index ae12ceafe10c..7298a5e5a183 100644
--- a/drivers/md/raid10.c
+++ b/drivers/md/raid10.c
@@ -1151,8 +1151,8 @@ static int raid10_add_disk(mddev_t *mddev, mdk_rdev_t *rdev)
1151 for ( ; mirror <= last ; mirror++) 1151 for ( ; mirror <= last ; mirror++)
1152 if ( !(p=conf->mirrors+mirror)->rdev) { 1152 if ( !(p=conf->mirrors+mirror)->rdev) {
1153 1153
1154 blk_queue_stack_limits(mddev->queue, 1154 disk_stack_limits(mddev->gendisk, rdev->bdev,
1155 rdev->bdev->bd_disk->queue); 1155 rdev->data_offset << 9);
1156 /* as we don't honour merge_bvec_fn, we must never risk 1156 /* as we don't honour merge_bvec_fn, we must never risk
1157 * violating it, so limit ->max_sector to one PAGE, as 1157 * violating it, so limit ->max_sector to one PAGE, as
1158 * a one page request is never in violation. 1158 * a one page request is never in violation.
@@ -2044,7 +2044,7 @@ raid10_size(mddev_t *mddev, sector_t sectors, int raid_disks)
2044static int run(mddev_t *mddev) 2044static int run(mddev_t *mddev)
2045{ 2045{
2046 conf_t *conf; 2046 conf_t *conf;
2047 int i, disk_idx; 2047 int i, disk_idx, chunk_size;
2048 mirror_info_t *disk; 2048 mirror_info_t *disk;
2049 mdk_rdev_t *rdev; 2049 mdk_rdev_t *rdev;
2050 int nc, fc, fo; 2050 int nc, fc, fo;
@@ -2130,6 +2130,14 @@ static int run(mddev_t *mddev)
2130 spin_lock_init(&conf->device_lock); 2130 spin_lock_init(&conf->device_lock);
2131 mddev->queue->queue_lock = &conf->device_lock; 2131 mddev->queue->queue_lock = &conf->device_lock;
2132 2132
2133 chunk_size = mddev->chunk_sectors << 9;
2134 blk_queue_io_min(mddev->queue, chunk_size);
2135 if (conf->raid_disks % conf->near_copies)
2136 blk_queue_io_opt(mddev->queue, chunk_size * conf->raid_disks);
2137 else
2138 blk_queue_io_opt(mddev->queue, chunk_size *
2139 (conf->raid_disks / conf->near_copies));
2140
2133 list_for_each_entry(rdev, &mddev->disks, same_set) { 2141 list_for_each_entry(rdev, &mddev->disks, same_set) {
2134 disk_idx = rdev->raid_disk; 2142 disk_idx = rdev->raid_disk;
2135 if (disk_idx >= mddev->raid_disks 2143 if (disk_idx >= mddev->raid_disks
@@ -2138,9 +2146,8 @@ static int run(mddev_t *mddev)
2138 disk = conf->mirrors + disk_idx; 2146 disk = conf->mirrors + disk_idx;
2139 2147
2140 disk->rdev = rdev; 2148 disk->rdev = rdev;
2141 2149 disk_stack_limits(mddev->gendisk, rdev->bdev,
2142 blk_queue_stack_limits(mddev->queue, 2150 rdev->data_offset << 9);
2143 rdev->bdev->bd_disk->queue);
2144 /* as we don't honour merge_bvec_fn, we must never risk 2151 /* as we don't honour merge_bvec_fn, we must never risk
2145 * violating it, so limit ->max_sector to one PAGE, as 2152 * violating it, so limit ->max_sector to one PAGE, as
2146 * a one page request is never in violation. 2153 * a one page request is never in violation.