aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/md/dm-table.c
diff options
context:
space:
mode:
authorMartin K. Petersen <martin.petersen@oracle.com>2009-05-22 17:17:49 -0400
committerJens Axboe <jens.axboe@oracle.com>2009-05-22 17:22:54 -0400
commite1defc4ff0cf57aca6c5e3ff99fa503f5943c1f1 (patch)
treed60d15a082171c58ac811d547d51a9c3119f23e3 /drivers/md/dm-table.c
parent9bd7de51ee8537094656149eaf45338cadb7d7d4 (diff)
block: Do away with the notion of hardsect_size
Until now we have had a 1:1 mapping between storage device physical block size and the logical block sized used when addressing the device. With SATA 4KB drives coming out that will no longer be the case. The sector size will be 4KB but the logical block size will remain 512-bytes. Hence we need to distinguish between the physical block size and the logical ditto. This patch renames hardsect_size to logical_block_size. Signed-off-by: Martin K. Petersen <martin.petersen@oracle.com> Signed-off-by: Jens Axboe <jens.axboe@oracle.com>
Diffstat (limited to 'drivers/md/dm-table.c')
-rw-r--r--drivers/md/dm-table.c12
1 files changed, 7 insertions, 5 deletions
diff --git a/drivers/md/dm-table.c b/drivers/md/dm-table.c
index 429b50b975d5..65e2d9759857 100644
--- a/drivers/md/dm-table.c
+++ b/drivers/md/dm-table.c
@@ -108,7 +108,8 @@ static void combine_restrictions_low(struct io_restrictions *lhs,
108 lhs->max_hw_segments = 108 lhs->max_hw_segments =
109 min_not_zero(lhs->max_hw_segments, rhs->max_hw_segments); 109 min_not_zero(lhs->max_hw_segments, rhs->max_hw_segments);
110 110
111 lhs->hardsect_size = max(lhs->hardsect_size, rhs->hardsect_size); 111 lhs->logical_block_size = max(lhs->logical_block_size,
112 rhs->logical_block_size);
112 113
113 lhs->max_segment_size = 114 lhs->max_segment_size =
114 min_not_zero(lhs->max_segment_size, rhs->max_segment_size); 115 min_not_zero(lhs->max_segment_size, rhs->max_segment_size);
@@ -529,7 +530,8 @@ void dm_set_device_limits(struct dm_target *ti, struct block_device *bdev)
529 rs->max_hw_segments = 530 rs->max_hw_segments =
530 min_not_zero(rs->max_hw_segments, q->max_hw_segments); 531 min_not_zero(rs->max_hw_segments, q->max_hw_segments);
531 532
532 rs->hardsect_size = max(rs->hardsect_size, q->hardsect_size); 533 rs->logical_block_size = max(rs->logical_block_size,
534 queue_logical_block_size(q));
533 535
534 rs->max_segment_size = 536 rs->max_segment_size =
535 min_not_zero(rs->max_segment_size, q->max_segment_size); 537 min_not_zero(rs->max_segment_size, q->max_segment_size);
@@ -683,8 +685,8 @@ static void check_for_valid_limits(struct io_restrictions *rs)
683 rs->max_phys_segments = MAX_PHYS_SEGMENTS; 685 rs->max_phys_segments = MAX_PHYS_SEGMENTS;
684 if (!rs->max_hw_segments) 686 if (!rs->max_hw_segments)
685 rs->max_hw_segments = MAX_HW_SEGMENTS; 687 rs->max_hw_segments = MAX_HW_SEGMENTS;
686 if (!rs->hardsect_size) 688 if (!rs->logical_block_size)
687 rs->hardsect_size = 1 << SECTOR_SHIFT; 689 rs->logical_block_size = 1 << SECTOR_SHIFT;
688 if (!rs->max_segment_size) 690 if (!rs->max_segment_size)
689 rs->max_segment_size = MAX_SEGMENT_SIZE; 691 rs->max_segment_size = MAX_SEGMENT_SIZE;
690 if (!rs->seg_boundary_mask) 692 if (!rs->seg_boundary_mask)
@@ -914,7 +916,7 @@ void dm_table_set_restrictions(struct dm_table *t, struct request_queue *q)
914 blk_queue_max_sectors(q, t->limits.max_sectors); 916 blk_queue_max_sectors(q, t->limits.max_sectors);
915 q->max_phys_segments = t->limits.max_phys_segments; 917 q->max_phys_segments = t->limits.max_phys_segments;
916 q->max_hw_segments = t->limits.max_hw_segments; 918 q->max_hw_segments = t->limits.max_hw_segments;
917 q->hardsect_size = t->limits.hardsect_size; 919 q->logical_block_size = t->limits.logical_block_size;
918 q->max_segment_size = t->limits.max_segment_size; 920 q->max_segment_size = t->limits.max_segment_size;
919 q->max_hw_sectors = t->limits.max_hw_sectors; 921 q->max_hw_sectors = t->limits.max_hw_sectors;
920 q->seg_boundary_mask = t->limits.seg_boundary_mask; 922 q->seg_boundary_mask = t->limits.seg_boundary_mask;