aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/md/raid0.c
diff options
context:
space:
mode:
authorraz ben yehuda <raziebe@gmail.com>2009-06-16 03:02:05 -0400
committerNeilBrown <neilb@suse.de>2009-06-16 03:02:05 -0400
commitfbb704efb784e2c8418e34dc3013af76bdd58101 (patch)
tree814820430d00128cb5f3fc0d44bb579c19d5d2a9 /drivers/md/raid0.c
parent2ac06c3332898103210b478c5a17c20e28929287 (diff)
md: raid0 :Enables chunk size other than powers of 2.
Maintain two flows, one for pow2 chunk sizes (which uses masks and shift), and a flow for the general case (which uses sector_div). This is for the sake of performance. - introduce map_sector and is_io_in_chunk_boundary to encapsulate those two flows better for raid0_make_request - fix blk_mergeable to support the two flows. Signed-off-by: raziebe@gmail.com Signed-off-by: NeilBrown <neilb@suse.de>
Diffstat (limited to 'drivers/md/raid0.c')
-rw-r--r--drivers/md/raid0.c107
1 files changed, 77 insertions, 30 deletions
diff --git a/drivers/md/raid0.c b/drivers/md/raid0.c
index 39936a217f95..7cd2671cc794 100644
--- a/drivers/md/raid0.c
+++ b/drivers/md/raid0.c
@@ -273,7 +273,12 @@ static int raid0_mergeable_bvec(struct request_queue *q,
273 unsigned int chunk_sectors = mddev->chunk_size >> 9; 273 unsigned int chunk_sectors = mddev->chunk_size >> 9;
274 unsigned int bio_sectors = bvm->bi_size >> 9; 274 unsigned int bio_sectors = bvm->bi_size >> 9;
275 275
276 max = (chunk_sectors - ((sector & (chunk_sectors - 1)) + bio_sectors)) << 9; 276 if (is_power_of_2(mddev->chunk_size))
277 max = (chunk_sectors - ((sector & (chunk_sectors-1))
278 + bio_sectors)) << 9;
279 else
280 max = (chunk_sectors - (sector_div(sector, chunk_sectors)
281 + bio_sectors)) << 9;
277 if (max < 0) max = 0; /* bio_add cannot handle a negative return */ 282 if (max < 0) max = 0; /* bio_add cannot handle a negative return */
278 if (max <= biovec->bv_len && bio_sectors == 0) 283 if (max <= biovec->bv_len && bio_sectors == 0)
279 return biovec->bv_len; 284 return biovec->bv_len;
@@ -299,9 +304,8 @@ static int raid0_run(mddev_t *mddev)
299{ 304{
300 int ret; 305 int ret;
301 306
302 if (mddev->chunk_size == 0 || 307 if (mddev->chunk_size == 0) {
303 !is_power_of_2(mddev->chunk_size)) { 308 printk(KERN_ERR "md/raid0: chunk size must be set.\n");
304 printk(KERN_ERR "md/raid0: chunk size must be a power of 2.\n");
305 return -EINVAL; 309 return -EINVAL;
306 } 310 }
307 blk_queue_max_sectors(mddev->queue, mddev->chunk_size >> 9); 311 blk_queue_max_sectors(mddev->queue, mddev->chunk_size >> 9);
@@ -367,15 +371,65 @@ static struct strip_zone *find_zone(struct raid0_private_data *conf,
367 BUG(); 371 BUG();
368} 372}
369 373
370static int raid0_make_request (struct request_queue *q, struct bio *bio) 374/*
375 * remaps the bio to the target device. we separate two flows.
376 * power 2 flow and a general flow for the sake of perfromance
377*/
378static mdk_rdev_t *map_sector(mddev_t *mddev, struct strip_zone *zone,
379 sector_t sector, sector_t *sector_offset)
371{ 380{
372 mddev_t *mddev = q->queuedata; 381 unsigned int sect_in_chunk;
373 unsigned int sect_in_chunk, chunksect_bits, chunk_sects; 382 sector_t chunk;
374 raid0_conf_t *conf = mddev->private; 383 raid0_conf_t *conf = mddev->private;
384 unsigned int chunk_sects = mddev->chunk_size >> 9;
385
386 if (is_power_of_2(mddev->chunk_size)) {
387 int chunksect_bits = ffz(~chunk_sects);
388 /* find the sector offset inside the chunk */
389 sect_in_chunk = sector & (chunk_sects - 1);
390 sector >>= chunksect_bits;
391 /* chunk in zone */
392 chunk = *sector_offset;
393 /* quotient is the chunk in real device*/
394 sector_div(chunk, zone->nb_dev << chunksect_bits);
395 } else{
396 sect_in_chunk = sector_div(sector, chunk_sects);
397 chunk = *sector_offset;
398 sector_div(chunk, chunk_sects * zone->nb_dev);
399 }
400 /*
401 * position the bio over the real device
402 * real sector = chunk in device + starting of zone
403 * + the position in the chunk
404 */
405 *sector_offset = (chunk * chunk_sects) + sect_in_chunk;
406 return conf->devlist[(zone - conf->strip_zone)*mddev->raid_disks
407 + sector_div(sector, zone->nb_dev)];
408}
409
410/*
411 * Is io distribute over 1 or more chunks ?
412*/
413static inline int is_io_in_chunk_boundary(mddev_t *mddev,
414 unsigned int chunk_sects, struct bio *bio)
415{
416 if (likely(is_power_of_2(mddev->chunk_size))) {
417 return chunk_sects >= ((bio->bi_sector & (chunk_sects-1))
418 + (bio->bi_size >> 9));
419 } else{
420 sector_t sector = bio->bi_sector;
421 return chunk_sects >= (sector_div(sector, chunk_sects)
422 + (bio->bi_size >> 9));
423 }
424}
425
426static int raid0_make_request(struct request_queue *q, struct bio *bio)
427{
428 mddev_t *mddev = q->queuedata;
429 unsigned int chunk_sects;
430 sector_t sector_offset;
375 struct strip_zone *zone; 431 struct strip_zone *zone;
376 mdk_rdev_t *tmp_dev; 432 mdk_rdev_t *tmp_dev;
377 sector_t chunk;
378 sector_t sector, rsect, sector_offset;
379 const int rw = bio_data_dir(bio); 433 const int rw = bio_data_dir(bio);
380 int cpu; 434 int cpu;
381 435
@@ -391,10 +445,8 @@ static int raid0_make_request (struct request_queue *q, struct bio *bio)
391 part_stat_unlock(); 445 part_stat_unlock();
392 446
393 chunk_sects = mddev->chunk_size >> 9; 447 chunk_sects = mddev->chunk_size >> 9;
394 chunksect_bits = ffz(~chunk_sects); 448 if (unlikely(!is_io_in_chunk_boundary(mddev, chunk_sects, bio))) {
395 sector = bio->bi_sector; 449 sector_t sector = bio->bi_sector;
396
397 if (unlikely(chunk_sects < (bio->bi_sector & (chunk_sects - 1)) + (bio->bi_size >> 9))) {
398 struct bio_pair *bp; 450 struct bio_pair *bp;
399 /* Sanity check -- queue functions should prevent this happening */ 451 /* Sanity check -- queue functions should prevent this happening */
400 if (bio->bi_vcnt != 1 || 452 if (bio->bi_vcnt != 1 ||
@@ -403,7 +455,12 @@ static int raid0_make_request (struct request_queue *q, struct bio *bio)
403 /* This is a one page bio that upper layers 455 /* This is a one page bio that upper layers
404 * refuse to split for us, so we need to split it. 456 * refuse to split for us, so we need to split it.
405 */ 457 */
406 bp = bio_split(bio, chunk_sects - (bio->bi_sector & (chunk_sects - 1))); 458 if (likely(is_power_of_2(mddev->chunk_size)))
459 bp = bio_split(bio, chunk_sects - (sector &
460 (chunk_sects-1)));
461 else
462 bp = bio_split(bio, chunk_sects -
463 sector_div(sector, chunk_sects));
407 if (raid0_make_request(q, &bp->bio1)) 464 if (raid0_make_request(q, &bp->bio1))
408 generic_make_request(&bp->bio1); 465 generic_make_request(&bp->bio1);
409 if (raid0_make_request(q, &bp->bio2)) 466 if (raid0_make_request(q, &bp->bio2))
@@ -412,24 +469,14 @@ static int raid0_make_request (struct request_queue *q, struct bio *bio)
412 bio_pair_release(bp); 469 bio_pair_release(bp);
413 return 0; 470 return 0;
414 } 471 }
415 sector_offset = sector;
416 zone = find_zone(conf, &sector_offset);
417 sect_in_chunk = bio->bi_sector & (chunk_sects - 1);
418 {
419 sector_t x = sector_offset >> chunksect_bits;
420
421 sector_div(x, zone->nb_dev);
422 chunk = x;
423 472
424 x = sector >> chunksect_bits; 473 sector_offset = bio->bi_sector;
425 tmp_dev = conf->devlist[(zone - conf->strip_zone)*mddev->raid_disks 474 zone = find_zone(mddev->private, &sector_offset);
426 + sector_div(x, zone->nb_dev)]; 475 tmp_dev = map_sector(mddev, zone, bio->bi_sector,
427 } 476 &sector_offset);
428 rsect = (chunk << chunksect_bits) + zone->dev_start + sect_in_chunk;
429
430 bio->bi_bdev = tmp_dev->bdev; 477 bio->bi_bdev = tmp_dev->bdev;
431 bio->bi_sector = rsect + tmp_dev->data_offset; 478 bio->bi_sector = sector_offset + zone->dev_start +
432 479 tmp_dev->data_offset;
433 /* 480 /*
434 * Let the main block layer submit the IO and resolve recursion: 481 * Let the main block layer submit the IO and resolve recursion:
435 */ 482 */