aboutsummaryrefslogtreecommitdiffstats
path: root/drivers
diff options
context:
space:
mode:
authorBryn Reeves <breeves@redhat.com>2006-10-03 04:15:42 -0400
committerLinus Torvalds <torvalds@g5.osdl.org>2006-10-03 11:04:16 -0400
commit3cb4021453a69585e458ec2177677c0c1300dccf (patch)
treecc2394e0eb52d6bed2fbc523641499714d6159c4 /drivers
parent9faf400f7e51e56ec76b2fc481c3191c01cb3a57 (diff)
[PATCH] dm: extract device limit setting
Separate the setting of device I/O limits from dm_get_device(). dm-loop will use this. Signed-off-by: Bryn Reeves <breeves@redhat.com> Signed-off-by: Alasdair G Kergon <agk@redhat.com> Signed-off-by: Andrew Morton <akpm@osdl.org> Signed-off-by: Linus Torvalds <torvalds@osdl.org>
Diffstat (limited to 'drivers')
-rw-r--r--drivers/md/dm-table.c87
1 files changed, 46 insertions, 41 deletions
diff --git a/drivers/md/dm-table.c b/drivers/md/dm-table.c
index 47412ae98fb9..4920998efeeb 100644
--- a/drivers/md/dm-table.c
+++ b/drivers/md/dm-table.c
@@ -522,56 +522,61 @@ static int __table_get_device(struct dm_table *t, struct dm_target *ti,
522 return 0; 522 return 0;
523} 523}
524 524
525 525void dm_set_device_limits(struct dm_target *ti, struct block_device *bdev)
526int dm_get_device(struct dm_target *ti, const char *path, sector_t start,
527 sector_t len, int mode, struct dm_dev **result)
528{ 526{
529 int r = __table_get_device(ti->table, ti, path, 527 request_queue_t *q = bdev_get_queue(bdev);
530 start, len, mode, result); 528 struct io_restrictions *rs = &ti->limits;
531 if (!r) { 529
532 request_queue_t *q = bdev_get_queue((*result)->bdev); 530 /*
533 struct io_restrictions *rs = &ti->limits; 531 * Combine the device limits low.
534 532 *
535 /* 533 * FIXME: if we move an io_restriction struct
536 * Combine the device limits low. 534 * into q this would just be a call to
537 * 535 * combine_restrictions_low()
538 * FIXME: if we move an io_restriction struct 536 */
539 * into q this would just be a call to 537 rs->max_sectors =
540 * combine_restrictions_low() 538 min_not_zero(rs->max_sectors, q->max_sectors);
541 */ 539
540 /* FIXME: Device-Mapper on top of RAID-0 breaks because DM
541 * currently doesn't honor MD's merge_bvec_fn routine.
542 * In this case, we'll force DM to use PAGE_SIZE or
543 * smaller I/O, just to be safe. A better fix is in the
544 * works, but add this for the time being so it will at
545 * least operate correctly.
546 */
547 if (q->merge_bvec_fn)
542 rs->max_sectors = 548 rs->max_sectors =
543 min_not_zero(rs->max_sectors, q->max_sectors); 549 min_not_zero(rs->max_sectors,
550 (unsigned int) (PAGE_SIZE >> 9));
544 551
545 /* FIXME: Device-Mapper on top of RAID-0 breaks because DM 552 rs->max_phys_segments =
546 * currently doesn't honor MD's merge_bvec_fn routine. 553 min_not_zero(rs->max_phys_segments,
547 * In this case, we'll force DM to use PAGE_SIZE or 554 q->max_phys_segments);
548 * smaller I/O, just to be safe. A better fix is in the
549 * works, but add this for the time being so it will at
550 * least operate correctly.
551 */
552 if (q->merge_bvec_fn)
553 rs->max_sectors =
554 min_not_zero(rs->max_sectors,
555 (unsigned int) (PAGE_SIZE >> 9));
556 555
557 rs->max_phys_segments = 556 rs->max_hw_segments =
558 min_not_zero(rs->max_phys_segments, 557 min_not_zero(rs->max_hw_segments, q->max_hw_segments);
559 q->max_phys_segments);
560 558
561 rs->max_hw_segments = 559 rs->hardsect_size = max(rs->hardsect_size, q->hardsect_size);
562 min_not_zero(rs->max_hw_segments, q->max_hw_segments);
563 560
564 rs->hardsect_size = max(rs->hardsect_size, q->hardsect_size); 561 rs->max_segment_size =
562 min_not_zero(rs->max_segment_size, q->max_segment_size);
565 563
566 rs->max_segment_size = 564 rs->seg_boundary_mask =
567 min_not_zero(rs->max_segment_size, q->max_segment_size); 565 min_not_zero(rs->seg_boundary_mask,
566 q->seg_boundary_mask);
568 567
569 rs->seg_boundary_mask = 568 rs->no_cluster |= !test_bit(QUEUE_FLAG_CLUSTER, &q->queue_flags);
570 min_not_zero(rs->seg_boundary_mask, 569}
571 q->seg_boundary_mask); 570EXPORT_SYMBOL_GPL(dm_set_device_limits);
572 571
573 rs->no_cluster |= !test_bit(QUEUE_FLAG_CLUSTER, &q->queue_flags); 572int dm_get_device(struct dm_target *ti, const char *path, sector_t start,
574 } 573 sector_t len, int mode, struct dm_dev **result)
574{
575 int r = __table_get_device(ti->table, ti, path,
576 start, len, mode, result);
577
578 if (!r)
579 dm_set_device_limits(ti, (*result)->bdev);
575 580
576 return r; 581 return r;
577} 582}