aboutsummaryrefslogtreecommitdiffstats
path: root/drivers
diff options
context:
space:
mode:
authorAndre Noll <maan@systemlinux.org>2008-10-12 20:55:12 -0400
committerNeilBrown <neilb@suse.de>2008-10-12 20:55:12 -0400
commit6283815d1853b7daf31dc4adb83e5c1dc9568251 (patch)
treecd4f00ce17e7c274091ca9015b164d6f06611975 /drivers
parent451708d2a439accbce136637ed4f156fc27371ab (diff)
md: linear: Represent dev_info->size and dev_info->offset in sectors.
Rename them to num_sectors and start_sector which is more descriptive. Signed-off-by: Andre Noll <maan@systemlinux.org> Signed-off-by: NeilBrown <neilb@suse.de>
Diffstat (limited to 'drivers')
-rw-r--r--drivers/md/linear.c54
1 files changed, 29 insertions, 25 deletions
diff --git a/drivers/md/linear.c b/drivers/md/linear.c
index 01ed03a0c7ee..1dadb134e0bb 100644
--- a/drivers/md/linear.c
+++ b/drivers/md/linear.c
@@ -42,7 +42,7 @@ static inline dev_info_t *which_dev(mddev_t *mddev, sector_t sector)
42 (void)sector_div(block, conf->hash_spacing); 42 (void)sector_div(block, conf->hash_spacing);
43 hash = conf->hash_table[block]; 43 hash = conf->hash_table[block];
44 44
45 while ((sector>>1) >= (hash->size + hash->offset)) 45 while (sector >= hash->num_sectors + hash->start_sector)
46 hash++; 46 hash++;
47 return hash; 47 return hash;
48} 48}
@@ -65,7 +65,7 @@ static int linear_mergeable_bvec(struct request_queue *q,
65 sector_t sector = bvm->bi_sector + get_start_sect(bvm->bi_bdev); 65 sector_t sector = bvm->bi_sector + get_start_sect(bvm->bi_bdev);
66 66
67 dev0 = which_dev(mddev, sector); 67 dev0 = which_dev(mddev, sector);
68 maxsectors = (dev0->size << 1) - (sector - (dev0->offset<<1)); 68 maxsectors = dev0->num_sectors - (sector - dev0->start_sector);
69 69
70 if (maxsectors < bio_sectors) 70 if (maxsectors < bio_sectors)
71 maxsectors = 0; 71 maxsectors = 0;
@@ -113,7 +113,7 @@ static linear_conf_t *linear_conf(mddev_t *mddev, int raid_disks)
113 mdk_rdev_t *rdev; 113 mdk_rdev_t *rdev;
114 int i, nb_zone, cnt; 114 int i, nb_zone, cnt;
115 sector_t min_spacing; 115 sector_t min_spacing;
116 sector_t curr_offset; 116 sector_t curr_sector;
117 struct list_head *tmp; 117 struct list_head *tmp;
118 118
119 conf = kzalloc (sizeof (*conf) + raid_disks*sizeof(dev_info_t), 119 conf = kzalloc (sizeof (*conf) + raid_disks*sizeof(dev_info_t),
@@ -145,7 +145,7 @@ static linear_conf_t *linear_conf(mddev_t *mddev, int raid_disks)
145 mddev->queue->max_sectors > (PAGE_SIZE>>9)) 145 mddev->queue->max_sectors > (PAGE_SIZE>>9))
146 blk_queue_max_sectors(mddev->queue, PAGE_SIZE>>9); 146 blk_queue_max_sectors(mddev->queue, PAGE_SIZE>>9);
147 147
148 disk->size = rdev->size; 148 disk->num_sectors = rdev->size * 2;
149 conf->array_sectors += rdev->size * 2; 149 conf->array_sectors += rdev->size * 2;
150 150
151 cnt++; 151 cnt++;
@@ -169,7 +169,7 @@ static linear_conf_t *linear_conf(mddev_t *mddev, int raid_disks)
169 sector_t sz = 0; 169 sector_t sz = 0;
170 int j; 170 int j;
171 for (j = i; j < cnt - 1 && sz < min_spacing; j++) 171 for (j = i; j < cnt - 1 && sz < min_spacing; j++)
172 sz += conf->disks[j].size; 172 sz += conf->disks[j].num_sectors / 2;
173 if (sz >= min_spacing && sz < conf->hash_spacing) 173 if (sz >= min_spacing && sz < conf->hash_spacing)
174 conf->hash_spacing = sz; 174 conf->hash_spacing = sz;
175 } 175 }
@@ -211,20 +211,20 @@ static linear_conf_t *linear_conf(mddev_t *mddev, int raid_disks)
211 * Here we generate the linear hash table 211 * Here we generate the linear hash table
212 * First calculate the device offsets. 212 * First calculate the device offsets.
213 */ 213 */
214 conf->disks[0].offset = 0; 214 conf->disks[0].start_sector = 0;
215 for (i = 1; i < raid_disks; i++) 215 for (i = 1; i < raid_disks; i++)
216 conf->disks[i].offset = 216 conf->disks[i].start_sector =
217 conf->disks[i-1].offset + 217 conf->disks[i-1].start_sector +
218 conf->disks[i-1].size; 218 conf->disks[i-1].num_sectors;
219 219
220 table = conf->hash_table; 220 table = conf->hash_table;
221 i = 0; 221 i = 0;
222 for (curr_offset = 0; 222 for (curr_sector = 0;
223 curr_offset < conf->array_sectors / 2; 223 curr_sector < conf->array_sectors;
224 curr_offset += conf->hash_spacing) { 224 curr_sector += conf->hash_spacing * 2) {
225 225
226 while (i < raid_disks-1 && 226 while (i < raid_disks-1 &&
227 curr_offset >= conf->disks[i+1].offset) 227 curr_sector >= conf->disks[i+1].start_sector)
228 i++; 228 i++;
229 229
230 *table ++ = conf->disks + i; 230 *table ++ = conf->disks + i;
@@ -316,7 +316,6 @@ static int linear_make_request (struct request_queue *q, struct bio *bio)
316 const int rw = bio_data_dir(bio); 316 const int rw = bio_data_dir(bio);
317 mddev_t *mddev = q->queuedata; 317 mddev_t *mddev = q->queuedata;
318 dev_info_t *tmp_dev; 318 dev_info_t *tmp_dev;
319 sector_t block;
320 int cpu; 319 int cpu;
321 320
322 if (unlikely(bio_barrier(bio))) { 321 if (unlikely(bio_barrier(bio))) {
@@ -331,29 +330,33 @@ static int linear_make_request (struct request_queue *q, struct bio *bio)
331 part_stat_unlock(); 330 part_stat_unlock();
332 331
333 tmp_dev = which_dev(mddev, bio->bi_sector); 332 tmp_dev = which_dev(mddev, bio->bi_sector);
334 block = bio->bi_sector >> 1;
335 333
336 if (unlikely(block >= (tmp_dev->size + tmp_dev->offset) 334 if (unlikely(bio->bi_sector >= (tmp_dev->num_sectors +
337 || block < tmp_dev->offset)) { 335 tmp_dev->start_sector)
336 || (bio->bi_sector <
337 tmp_dev->start_sector))) {
338 char b[BDEVNAME_SIZE]; 338 char b[BDEVNAME_SIZE];
339 339
340 printk("linear_make_request: Block %llu out of bounds on " 340 printk("linear_make_request: Sector %llu out of bounds on "
341 "dev %s size %llu offset %llu\n", 341 "dev %s: %llu sectors, offset %llu\n",
342 (unsigned long long)block, 342 (unsigned long long)bio->bi_sector,
343 bdevname(tmp_dev->rdev->bdev, b), 343 bdevname(tmp_dev->rdev->bdev, b),
344 (unsigned long long)tmp_dev->size, 344 (unsigned long long)tmp_dev->num_sectors,
345 (unsigned long long)tmp_dev->offset); 345 (unsigned long long)tmp_dev->start_sector);
346 bio_io_error(bio); 346 bio_io_error(bio);
347 return 0; 347 return 0;
348 } 348 }
349 if (unlikely(bio->bi_sector + (bio->bi_size >> 9) > 349 if (unlikely(bio->bi_sector + (bio->bi_size >> 9) >
350 (tmp_dev->offset + tmp_dev->size)<<1)) { 350 tmp_dev->start_sector + tmp_dev->num_sectors)) {
351 /* This bio crosses a device boundary, so we have to 351 /* This bio crosses a device boundary, so we have to
352 * split it. 352 * split it.
353 */ 353 */
354 struct bio_pair *bp; 354 struct bio_pair *bp;
355
355 bp = bio_split(bio, 356 bp = bio_split(bio,
356 ((tmp_dev->offset + tmp_dev->size)<<1) - bio->bi_sector); 357 tmp_dev->start_sector + tmp_dev->num_sectors
358 - bio->bi_sector);
359
357 if (linear_make_request(q, &bp->bio1)) 360 if (linear_make_request(q, &bp->bio1))
358 generic_make_request(&bp->bio1); 361 generic_make_request(&bp->bio1);
359 if (linear_make_request(q, &bp->bio2)) 362 if (linear_make_request(q, &bp->bio2))
@@ -363,7 +366,8 @@ static int linear_make_request (struct request_queue *q, struct bio *bio)
363 } 366 }
364 367
365 bio->bi_bdev = tmp_dev->rdev->bdev; 368 bio->bi_bdev = tmp_dev->rdev->bdev;
366 bio->bi_sector = bio->bi_sector - (tmp_dev->offset << 1) + tmp_dev->rdev->data_offset; 369 bio->bi_sector = bio->bi_sector - tmp_dev->start_sector
370 + tmp_dev->rdev->data_offset;
367 371
368 return 1; 372 return 1;
369} 373}