aboutsummaryrefslogtreecommitdiffstats
path: root/drivers
diff options
context:
space:
mode:
authorAlasdair G Kergon <agk@redhat.com>2008-07-03 03:53:43 -0400
committerJens Axboe <jens.axboe@oracle.com>2008-07-03 07:21:15 -0400
commitcc371e66e340f35eed8dc4651c7c18e754c7fb26 (patch)
tree5a2d6727eb07a05999c531a90da43ab1c36b713e /drivers
parentb24498d477a14680fc3bb3ad884fa9fa76a2d237 (diff)
Add bvec_merge_data to handle stacked devices and ->merge_bvec()
When devices are stacked, one device's merge_bvec_fn may need to perform the mapping and then call one or more functions for its underlying devices. The following bio fields are used: bio->bi_sector bio->bi_bdev bio->bi_size bio->bi_rw using bio_data_dir() This patch creates a new struct bvec_merge_data holding a copy of those fields to avoid having to change them directly in the struct bio when going down the stack only to have to change them back again on the way back up. (And then when the bio gets mapped for real, the whole exercise gets repeated, but that's a problem for another day...) Signed-off-by: Alasdair G Kergon <agk@redhat.com> Cc: Neil Brown <neilb@suse.de> Cc: Milan Broz <mbroz@redhat.com> Signed-off-by: Jens Axboe <jens.axboe@oracle.com>
Diffstat (limited to 'drivers')
-rw-r--r--drivers/block/pktcdvd.c9
-rw-r--r--drivers/md/linear.c10
-rw-r--r--drivers/md/raid0.c10
-rw-r--r--drivers/md/raid10.c15
-rw-r--r--drivers/md/raid5.c10
5 files changed, 31 insertions, 23 deletions
diff --git a/drivers/block/pktcdvd.c b/drivers/block/pktcdvd.c
index 3ba1df93e9e3..589850cff359 100644
--- a/drivers/block/pktcdvd.c
+++ b/drivers/block/pktcdvd.c
@@ -2633,11 +2633,12 @@ end_io:
2633 2633
2634 2634
2635 2635
2636static int pkt_merge_bvec(struct request_queue *q, struct bio *bio, struct bio_vec *bvec) 2636static int pkt_merge_bvec(struct request_queue *q, struct bvec_merge_data *bmd,
2637 struct bio_vec *bvec)
2637{ 2638{
2638 struct pktcdvd_device *pd = q->queuedata; 2639 struct pktcdvd_device *pd = q->queuedata;
2639 sector_t zone = ZONE(bio->bi_sector, pd); 2640 sector_t zone = ZONE(bmd->bi_sector, pd);
2640 int used = ((bio->bi_sector - zone) << 9) + bio->bi_size; 2641 int used = ((bmd->bi_sector - zone) << 9) + bmd->bi_size;
2641 int remaining = (pd->settings.size << 9) - used; 2642 int remaining = (pd->settings.size << 9) - used;
2642 int remaining2; 2643 int remaining2;
2643 2644
@@ -2645,7 +2646,7 @@ static int pkt_merge_bvec(struct request_queue *q, struct bio *bio, struct bio_v
2645 * A bio <= PAGE_SIZE must be allowed. If it crosses a packet 2646 * A bio <= PAGE_SIZE must be allowed. If it crosses a packet
2646 * boundary, pkt_make_request() will split the bio. 2647 * boundary, pkt_make_request() will split the bio.
2647 */ 2648 */
2648 remaining2 = PAGE_SIZE - bio->bi_size; 2649 remaining2 = PAGE_SIZE - bmd->bi_size;
2649 remaining = max(remaining, remaining2); 2650 remaining = max(remaining, remaining2);
2650 2651
2651 BUG_ON(remaining < 0); 2652 BUG_ON(remaining < 0);
diff --git a/drivers/md/linear.c b/drivers/md/linear.c
index 10748240cb2f..6a866d7c8ae5 100644
--- a/drivers/md/linear.c
+++ b/drivers/md/linear.c
@@ -50,17 +50,19 @@ static inline dev_info_t *which_dev(mddev_t *mddev, sector_t sector)
50/** 50/**
51 * linear_mergeable_bvec -- tell bio layer if two requests can be merged 51 * linear_mergeable_bvec -- tell bio layer if two requests can be merged
52 * @q: request queue 52 * @q: request queue
53 * @bio: the buffer head that's been built up so far 53 * @bvm: properties of new bio
54 * @biovec: the request that could be merged to it. 54 * @biovec: the request that could be merged to it.
55 * 55 *
56 * Return amount of bytes we can take at this offset 56 * Return amount of bytes we can take at this offset
57 */ 57 */
58static int linear_mergeable_bvec(struct request_queue *q, struct bio *bio, struct bio_vec *biovec) 58static int linear_mergeable_bvec(struct request_queue *q,
59 struct bvec_merge_data *bvm,
60 struct bio_vec *biovec)
59{ 61{
60 mddev_t *mddev = q->queuedata; 62 mddev_t *mddev = q->queuedata;
61 dev_info_t *dev0; 63 dev_info_t *dev0;
62 unsigned long maxsectors, bio_sectors = bio->bi_size >> 9; 64 unsigned long maxsectors, bio_sectors = bvm->bi_size >> 9;
63 sector_t sector = bio->bi_sector + get_start_sect(bio->bi_bdev); 65 sector_t sector = bvm->bi_sector + get_start_sect(bvm->bi_bdev);
64 66
65 dev0 = which_dev(mddev, sector); 67 dev0 = which_dev(mddev, sector);
66 maxsectors = (dev0->size << 1) - (sector - (dev0->offset<<1)); 68 maxsectors = (dev0->size << 1) - (sector - (dev0->offset<<1));
diff --git a/drivers/md/raid0.c b/drivers/md/raid0.c
index 914c04ddec7c..bcbb82594a19 100644
--- a/drivers/md/raid0.c
+++ b/drivers/md/raid0.c
@@ -241,18 +241,20 @@ static int create_strip_zones (mddev_t *mddev)
241/** 241/**
242 * raid0_mergeable_bvec -- tell bio layer if a two requests can be merged 242 * raid0_mergeable_bvec -- tell bio layer if a two requests can be merged
243 * @q: request queue 243 * @q: request queue
244 * @bio: the buffer head that's been built up so far 244 * @bvm: properties of new bio
245 * @biovec: the request that could be merged to it. 245 * @biovec: the request that could be merged to it.
246 * 246 *
247 * Return amount of bytes we can accept at this offset 247 * Return amount of bytes we can accept at this offset
248 */ 248 */
249static int raid0_mergeable_bvec(struct request_queue *q, struct bio *bio, struct bio_vec *biovec) 249static int raid0_mergeable_bvec(struct request_queue *q,
250 struct bvec_merge_data *bvm,
251 struct bio_vec *biovec)
250{ 252{
251 mddev_t *mddev = q->queuedata; 253 mddev_t *mddev = q->queuedata;
252 sector_t sector = bio->bi_sector + get_start_sect(bio->bi_bdev); 254 sector_t sector = bvm->bi_sector + get_start_sect(bvm->bi_bdev);
253 int max; 255 int max;
254 unsigned int chunk_sectors = mddev->chunk_size >> 9; 256 unsigned int chunk_sectors = mddev->chunk_size >> 9;
255 unsigned int bio_sectors = bio->bi_size >> 9; 257 unsigned int bio_sectors = bvm->bi_size >> 9;
256 258
257 max = (chunk_sectors - ((sector & (chunk_sectors - 1)) + bio_sectors)) << 9; 259 max = (chunk_sectors - ((sector & (chunk_sectors - 1)) + bio_sectors)) << 9;
258 if (max < 0) max = 0; /* bio_add cannot handle a negative return */ 260 if (max < 0) max = 0; /* bio_add cannot handle a negative return */
diff --git a/drivers/md/raid10.c b/drivers/md/raid10.c
index a71277b640ab..22bb2b1b886d 100644
--- a/drivers/md/raid10.c
+++ b/drivers/md/raid10.c
@@ -439,26 +439,27 @@ static sector_t raid10_find_virt(conf_t *conf, sector_t sector, int dev)
439/** 439/**
440 * raid10_mergeable_bvec -- tell bio layer if a two requests can be merged 440 * raid10_mergeable_bvec -- tell bio layer if a two requests can be merged
441 * @q: request queue 441 * @q: request queue
442 * @bio: the buffer head that's been built up so far 442 * @bvm: properties of new bio
443 * @biovec: the request that could be merged to it. 443 * @biovec: the request that could be merged to it.
444 * 444 *
445 * Return amount of bytes we can accept at this offset 445 * Return amount of bytes we can accept at this offset
446 * If near_copies == raid_disk, there are no striping issues, 446 * If near_copies == raid_disk, there are no striping issues,
447 * but in that case, the function isn't called at all. 447 * but in that case, the function isn't called at all.
448 */ 448 */
449static int raid10_mergeable_bvec(struct request_queue *q, struct bio *bio, 449static int raid10_mergeable_bvec(struct request_queue *q,
450 struct bio_vec *bio_vec) 450 struct bvec_merge_data *bvm,
451 struct bio_vec *biovec)
451{ 452{
452 mddev_t *mddev = q->queuedata; 453 mddev_t *mddev = q->queuedata;
453 sector_t sector = bio->bi_sector + get_start_sect(bio->bi_bdev); 454 sector_t sector = bvm->bi_sector + get_start_sect(bvm->bi_bdev);
454 int max; 455 int max;
455 unsigned int chunk_sectors = mddev->chunk_size >> 9; 456 unsigned int chunk_sectors = mddev->chunk_size >> 9;
456 unsigned int bio_sectors = bio->bi_size >> 9; 457 unsigned int bio_sectors = bvm->bi_size >> 9;
457 458
458 max = (chunk_sectors - ((sector & (chunk_sectors - 1)) + bio_sectors)) << 9; 459 max = (chunk_sectors - ((sector & (chunk_sectors - 1)) + bio_sectors)) << 9;
459 if (max < 0) max = 0; /* bio_add cannot handle a negative return */ 460 if (max < 0) max = 0; /* bio_add cannot handle a negative return */
460 if (max <= bio_vec->bv_len && bio_sectors == 0) 461 if (max <= biovec->bv_len && bio_sectors == 0)
461 return bio_vec->bv_len; 462 return biovec->bv_len;
462 else 463 else
463 return max; 464 return max;
464} 465}
diff --git a/drivers/md/raid5.c b/drivers/md/raid5.c
index 54c8ee28fcc4..9b00675dc64f 100644
--- a/drivers/md/raid5.c
+++ b/drivers/md/raid5.c
@@ -3319,15 +3319,17 @@ static int raid5_congested(void *data, int bits)
3319/* We want read requests to align with chunks where possible, 3319/* We want read requests to align with chunks where possible,
3320 * but write requests don't need to. 3320 * but write requests don't need to.
3321 */ 3321 */
3322static int raid5_mergeable_bvec(struct request_queue *q, struct bio *bio, struct bio_vec *biovec) 3322static int raid5_mergeable_bvec(struct request_queue *q,
3323 struct bvec_merge_data *bvm,
3324 struct bio_vec *biovec)
3323{ 3325{
3324 mddev_t *mddev = q->queuedata; 3326 mddev_t *mddev = q->queuedata;
3325 sector_t sector = bio->bi_sector + get_start_sect(bio->bi_bdev); 3327 sector_t sector = bvm->bi_sector + get_start_sect(bvm->bi_bdev);
3326 int max; 3328 int max;
3327 unsigned int chunk_sectors = mddev->chunk_size >> 9; 3329 unsigned int chunk_sectors = mddev->chunk_size >> 9;
3328 unsigned int bio_sectors = bio->bi_size >> 9; 3330 unsigned int bio_sectors = bvm->bi_size >> 9;
3329 3331
3330 if (bio_data_dir(bio) == WRITE) 3332 if ((bvm->bi_rw & 1) == WRITE)
3331 return biovec->bv_len; /* always allow writes to be mergeable */ 3333 return biovec->bv_len; /* always allow writes to be mergeable */
3332 3334
3333 max = (chunk_sectors - ((sector & (chunk_sectors - 1)) + bio_sectors)) << 9; 3335 max = (chunk_sectors - ((sector & (chunk_sectors - 1)) + bio_sectors)) << 9;