diff options
author | Martin K. Petersen <martin.petersen@oracle.com> | 2009-05-22 17:17:50 -0400 |
---|---|---|
committer | Jens Axboe <jens.axboe@oracle.com> | 2009-05-22 17:22:54 -0400 |
commit | ae03bf639a5027d27270123f5f6e3ee6a412781d (patch) | |
tree | d705f41a188ad656b1f47f7952626a9f992e3b8f /drivers/md/dm-table.c | |
parent | e1defc4ff0cf57aca6c5e3ff99fa503f5943c1f1 (diff) |
block: Use accessor functions for queue limits
Convert all external users of queue limits to using wrapper functions
instead of poking the request queue variables directly.
Signed-off-by: Martin K. Petersen <martin.petersen@oracle.com>
Signed-off-by: Jens Axboe <jens.axboe@oracle.com>
Diffstat (limited to 'drivers/md/dm-table.c')
-rw-r--r-- | drivers/md/dm-table.c | 28 |
1 files changed, 14 insertions, 14 deletions
diff --git a/drivers/md/dm-table.c b/drivers/md/dm-table.c index 65e2d9759857..e9a73bb242b0 100644 --- a/drivers/md/dm-table.c +++ b/drivers/md/dm-table.c | |||
@@ -510,7 +510,7 @@ void dm_set_device_limits(struct dm_target *ti, struct block_device *bdev) | |||
510 | * combine_restrictions_low() | 510 | * combine_restrictions_low() |
511 | */ | 511 | */ |
512 | rs->max_sectors = | 512 | rs->max_sectors = |
513 | min_not_zero(rs->max_sectors, q->max_sectors); | 513 | min_not_zero(rs->max_sectors, queue_max_sectors(q)); |
514 | 514 | ||
515 | /* | 515 | /* |
516 | * Check if merge fn is supported. | 516 | * Check if merge fn is supported. |
@@ -525,25 +525,25 @@ void dm_set_device_limits(struct dm_target *ti, struct block_device *bdev) | |||
525 | 525 | ||
526 | rs->max_phys_segments = | 526 | rs->max_phys_segments = |
527 | min_not_zero(rs->max_phys_segments, | 527 | min_not_zero(rs->max_phys_segments, |
528 | q->max_phys_segments); | 528 | queue_max_phys_segments(q)); |
529 | 529 | ||
530 | rs->max_hw_segments = | 530 | rs->max_hw_segments = |
531 | min_not_zero(rs->max_hw_segments, q->max_hw_segments); | 531 | min_not_zero(rs->max_hw_segments, queue_max_hw_segments(q)); |
532 | 532 | ||
533 | rs->logical_block_size = max(rs->logical_block_size, | 533 | rs->logical_block_size = max(rs->logical_block_size, |
534 | queue_logical_block_size(q)); | 534 | queue_logical_block_size(q)); |
535 | 535 | ||
536 | rs->max_segment_size = | 536 | rs->max_segment_size = |
537 | min_not_zero(rs->max_segment_size, q->max_segment_size); | 537 | min_not_zero(rs->max_segment_size, queue_max_segment_size(q)); |
538 | 538 | ||
539 | rs->max_hw_sectors = | 539 | rs->max_hw_sectors = |
540 | min_not_zero(rs->max_hw_sectors, q->max_hw_sectors); | 540 | min_not_zero(rs->max_hw_sectors, queue_max_hw_sectors(q)); |
541 | 541 | ||
542 | rs->seg_boundary_mask = | 542 | rs->seg_boundary_mask = |
543 | min_not_zero(rs->seg_boundary_mask, | 543 | min_not_zero(rs->seg_boundary_mask, |
544 | q->seg_boundary_mask); | 544 | queue_segment_boundary(q)); |
545 | 545 | ||
546 | rs->bounce_pfn = min_not_zero(rs->bounce_pfn, q->bounce_pfn); | 546 | rs->bounce_pfn = min_not_zero(rs->bounce_pfn, queue_bounce_pfn(q)); |
547 | 547 | ||
548 | rs->no_cluster |= !test_bit(QUEUE_FLAG_CLUSTER, &q->queue_flags); | 548 | rs->no_cluster |= !test_bit(QUEUE_FLAG_CLUSTER, &q->queue_flags); |
549 | } | 549 | } |
@@ -914,13 +914,13 @@ void dm_table_set_restrictions(struct dm_table *t, struct request_queue *q) | |||
914 | * restrictions. | 914 | * restrictions. |
915 | */ | 915 | */ |
916 | blk_queue_max_sectors(q, t->limits.max_sectors); | 916 | blk_queue_max_sectors(q, t->limits.max_sectors); |
917 | q->max_phys_segments = t->limits.max_phys_segments; | 917 | blk_queue_max_phys_segments(q, t->limits.max_phys_segments); |
918 | q->max_hw_segments = t->limits.max_hw_segments; | 918 | blk_queue_max_hw_segments(q, t->limits.max_hw_segments); |
919 | q->logical_block_size = t->limits.logical_block_size; | 919 | blk_queue_logical_block_size(q, t->limits.logical_block_size); |
920 | q->max_segment_size = t->limits.max_segment_size; | 920 | blk_queue_max_segment_size(q, t->limits.max_segment_size); |
921 | q->max_hw_sectors = t->limits.max_hw_sectors; | 921 | blk_queue_max_hw_sectors(q, t->limits.max_hw_sectors); |
922 | q->seg_boundary_mask = t->limits.seg_boundary_mask; | 922 | blk_queue_segment_boundary(q, t->limits.seg_boundary_mask); |
923 | q->bounce_pfn = t->limits.bounce_pfn; | 923 | blk_queue_bounce_limit(q, t->limits.bounce_pfn); |
924 | 924 | ||
925 | if (t->limits.no_cluster) | 925 | if (t->limits.no_cluster) |
926 | queue_flag_clear_unlocked(QUEUE_FLAG_CLUSTER, q); | 926 | queue_flag_clear_unlocked(QUEUE_FLAG_CLUSTER, q); |