diff options
author | Andre Noll <maan@systemlinux.org> | 2009-06-17 18:45:01 -0400 |
---|---|---|
committer | NeilBrown <neilb@suse.de> | 2009-06-17 18:45:01 -0400 |
commit | 9d8f0363623b3da12c43007cf77f5e1a4e8a5964 (patch) | |
tree | 0fee53971a397ade209dd36c4f1ed50db6450faf /drivers/md/raid0.c | |
parent | fbb704efb784e2c8418e34dc3013af76bdd58101 (diff) |
md: Make mddev->chunk_size sector-based.
This patch renames the chunk_size field to chunk_sectors with the
implied change of semantics. Since
is_power_of_2(chunk_size) = is_power_of_2(chunk_sectors << 9)
= is_power_of_2(chunk_sectors)
these bits don't need an adjustment for the shift.
Signed-off-by: Andre Noll <maan@systemlinux.org>
Signed-off-by: NeilBrown <neilb@suse.de>
Diffstat (limited to 'drivers/md/raid0.c')
-rw-r--r-- | drivers/md/raid0.c | 27 |
1 files changed, 14 insertions, 13 deletions
diff --git a/drivers/md/raid0.c b/drivers/md/raid0.c index 7cd2671cc794..f20b18ff7969 100644 --- a/drivers/md/raid0.c +++ b/drivers/md/raid0.c | |||
@@ -238,10 +238,10 @@ static int create_strip_zones(mddev_t *mddev) | |||
238 | * now since we have the hard sector sizes, we can make sure | 238 | * now since we have the hard sector sizes, we can make sure |
239 | * chunk size is a multiple of that sector size | 239 | * chunk size is a multiple of that sector size |
240 | */ | 240 | */ |
241 | if (mddev->chunk_size % queue_logical_block_size(mddev->queue)) { | 241 | if ((mddev->chunk_sectors << 9) % queue_logical_block_size(mddev->queue)) { |
242 | printk(KERN_ERR "%s chunk_size of %d not valid\n", | 242 | printk(KERN_ERR "%s chunk_size of %d not valid\n", |
243 | mdname(mddev), | 243 | mdname(mddev), |
244 | mddev->chunk_size); | 244 | mddev->chunk_sectors << 9); |
245 | goto abort; | 245 | goto abort; |
246 | } | 246 | } |
247 | printk(KERN_INFO "raid0: done.\n"); | 247 | printk(KERN_INFO "raid0: done.\n"); |
@@ -270,10 +270,10 @@ static int raid0_mergeable_bvec(struct request_queue *q, | |||
270 | mddev_t *mddev = q->queuedata; | 270 | mddev_t *mddev = q->queuedata; |
271 | sector_t sector = bvm->bi_sector + get_start_sect(bvm->bi_bdev); | 271 | sector_t sector = bvm->bi_sector + get_start_sect(bvm->bi_bdev); |
272 | int max; | 272 | int max; |
273 | unsigned int chunk_sectors = mddev->chunk_size >> 9; | 273 | unsigned int chunk_sectors = mddev->chunk_sectors; |
274 | unsigned int bio_sectors = bvm->bi_size >> 9; | 274 | unsigned int bio_sectors = bvm->bi_size >> 9; |
275 | 275 | ||
276 | if (is_power_of_2(mddev->chunk_size)) | 276 | if (is_power_of_2(mddev->chunk_sectors)) |
277 | max = (chunk_sectors - ((sector & (chunk_sectors-1)) | 277 | max = (chunk_sectors - ((sector & (chunk_sectors-1)) |
278 | + bio_sectors)) << 9; | 278 | + bio_sectors)) << 9; |
279 | else | 279 | else |
@@ -304,11 +304,11 @@ static int raid0_run(mddev_t *mddev) | |||
304 | { | 304 | { |
305 | int ret; | 305 | int ret; |
306 | 306 | ||
307 | if (mddev->chunk_size == 0) { | 307 | if (mddev->chunk_sectors == 0) { |
308 | printk(KERN_ERR "md/raid0: chunk size must be set.\n"); | 308 | printk(KERN_ERR "md/raid0: chunk size must be set.\n"); |
309 | return -EINVAL; | 309 | return -EINVAL; |
310 | } | 310 | } |
311 | blk_queue_max_sectors(mddev->queue, mddev->chunk_size >> 9); | 311 | blk_queue_max_sectors(mddev->queue, mddev->chunk_sectors); |
312 | mddev->queue->queue_lock = &mddev->queue->__queue_lock; | 312 | mddev->queue->queue_lock = &mddev->queue->__queue_lock; |
313 | 313 | ||
314 | ret = create_strip_zones(mddev); | 314 | ret = create_strip_zones(mddev); |
@@ -330,7 +330,8 @@ static int raid0_run(mddev_t *mddev) | |||
330 | * chunksize should be used in that case. | 330 | * chunksize should be used in that case. |
331 | */ | 331 | */ |
332 | { | 332 | { |
333 | int stripe = mddev->raid_disks * mddev->chunk_size / PAGE_SIZE; | 333 | int stripe = mddev->raid_disks * |
334 | (mddev->chunk_sectors << 9) / PAGE_SIZE; | ||
334 | if (mddev->queue->backing_dev_info.ra_pages < 2* stripe) | 335 | if (mddev->queue->backing_dev_info.ra_pages < 2* stripe) |
335 | mddev->queue->backing_dev_info.ra_pages = 2* stripe; | 336 | mddev->queue->backing_dev_info.ra_pages = 2* stripe; |
336 | } | 337 | } |
@@ -381,9 +382,9 @@ static mdk_rdev_t *map_sector(mddev_t *mddev, struct strip_zone *zone, | |||
381 | unsigned int sect_in_chunk; | 382 | unsigned int sect_in_chunk; |
382 | sector_t chunk; | 383 | sector_t chunk; |
383 | raid0_conf_t *conf = mddev->private; | 384 | raid0_conf_t *conf = mddev->private; |
384 | unsigned int chunk_sects = mddev->chunk_size >> 9; | 385 | unsigned int chunk_sects = mddev->chunk_sectors; |
385 | 386 | ||
386 | if (is_power_of_2(mddev->chunk_size)) { | 387 | if (is_power_of_2(mddev->chunk_sectors)) { |
387 | int chunksect_bits = ffz(~chunk_sects); | 388 | int chunksect_bits = ffz(~chunk_sects); |
388 | /* find the sector offset inside the chunk */ | 389 | /* find the sector offset inside the chunk */ |
389 | sect_in_chunk = sector & (chunk_sects - 1); | 390 | sect_in_chunk = sector & (chunk_sects - 1); |
@@ -413,7 +414,7 @@ static mdk_rdev_t *map_sector(mddev_t *mddev, struct strip_zone *zone, | |||
413 | static inline int is_io_in_chunk_boundary(mddev_t *mddev, | 414 | static inline int is_io_in_chunk_boundary(mddev_t *mddev, |
414 | unsigned int chunk_sects, struct bio *bio) | 415 | unsigned int chunk_sects, struct bio *bio) |
415 | { | 416 | { |
416 | if (likely(is_power_of_2(mddev->chunk_size))) { | 417 | if (likely(is_power_of_2(mddev->chunk_sectors))) { |
417 | return chunk_sects >= ((bio->bi_sector & (chunk_sects-1)) | 418 | return chunk_sects >= ((bio->bi_sector & (chunk_sects-1)) |
418 | + (bio->bi_size >> 9)); | 419 | + (bio->bi_size >> 9)); |
419 | } else{ | 420 | } else{ |
@@ -444,7 +445,7 @@ static int raid0_make_request(struct request_queue *q, struct bio *bio) | |||
444 | bio_sectors(bio)); | 445 | bio_sectors(bio)); |
445 | part_stat_unlock(); | 446 | part_stat_unlock(); |
446 | 447 | ||
447 | chunk_sects = mddev->chunk_size >> 9; | 448 | chunk_sects = mddev->chunk_sectors; |
448 | if (unlikely(!is_io_in_chunk_boundary(mddev, chunk_sects, bio))) { | 449 | if (unlikely(!is_io_in_chunk_boundary(mddev, chunk_sects, bio))) { |
449 | sector_t sector = bio->bi_sector; | 450 | sector_t sector = bio->bi_sector; |
450 | struct bio_pair *bp; | 451 | struct bio_pair *bp; |
@@ -455,7 +456,7 @@ static int raid0_make_request(struct request_queue *q, struct bio *bio) | |||
455 | /* This is a one page bio that upper layers | 456 | /* This is a one page bio that upper layers |
456 | * refuse to split for us, so we need to split it. | 457 | * refuse to split for us, so we need to split it. |
457 | */ | 458 | */ |
458 | if (likely(is_power_of_2(mddev->chunk_size))) | 459 | if (likely(is_power_of_2(mddev->chunk_sectors))) |
459 | bp = bio_split(bio, chunk_sects - (sector & | 460 | bp = bio_split(bio, chunk_sects - (sector & |
460 | (chunk_sects-1))); | 461 | (chunk_sects-1))); |
461 | else | 462 | else |
@@ -519,7 +520,7 @@ static void raid0_status(struct seq_file *seq, mddev_t *mddev) | |||
519 | zone_start = conf->strip_zone[j].zone_end; | 520 | zone_start = conf->strip_zone[j].zone_end; |
520 | } | 521 | } |
521 | #endif | 522 | #endif |
522 | seq_printf(seq, " %dk chunks", mddev->chunk_size/1024); | 523 | seq_printf(seq, " %dk chunks", mddev->chunk_sectors / 2); |
523 | return; | 524 | return; |
524 | } | 525 | } |
525 | 526 | ||