aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/md
diff options
context:
space:
mode:
authorMartin K. Petersen <martin.petersen@oracle.com>2009-05-22 17:17:49 -0400
committerJens Axboe <jens.axboe@oracle.com>2009-05-22 17:22:54 -0400
commite1defc4ff0cf57aca6c5e3ff99fa503f5943c1f1 (patch)
treed60d15a082171c58ac811d547d51a9c3119f23e3 /drivers/md
parent9bd7de51ee8537094656149eaf45338cadb7d7d4 (diff)
block: Do away with the notion of hardsect_size
Until now we have had a 1:1 mapping between storage device physical block size and the logical block sized used when addressing the device. With SATA 4KB drives coming out that will no longer be the case. The sector size will be 4KB but the logical block size will remain 512-bytes. Hence we need to distinguish between the physical block size and the logical ditto. This patch renames hardsect_size to logical_block_size. Signed-off-by: Martin K. Petersen <martin.petersen@oracle.com> Signed-off-by: Jens Axboe <jens.axboe@oracle.com>
Diffstat (limited to 'drivers/md')
-rw-r--r--drivers/md/bitmap.c4
-rw-r--r--drivers/md/dm-exception-store.c2
-rw-r--r--drivers/md/dm-log.c3
-rw-r--r--drivers/md/dm-snap-persistent.c2
-rw-r--r--drivers/md/dm-table.c12
-rw-r--r--drivers/md/md.c2
6 files changed, 14 insertions, 11 deletions
diff --git a/drivers/md/bitmap.c b/drivers/md/bitmap.c
index 47c68bc75a17..06b0ded1ce23 100644
--- a/drivers/md/bitmap.c
+++ b/drivers/md/bitmap.c
@@ -232,7 +232,7 @@ static struct page *read_sb_page(mddev_t *mddev, long offset,
232 target = rdev->sb_start + offset + index * (PAGE_SIZE/512); 232 target = rdev->sb_start + offset + index * (PAGE_SIZE/512);
233 233
234 if (sync_page_io(rdev->bdev, target, 234 if (sync_page_io(rdev->bdev, target,
235 roundup(size, bdev_hardsect_size(rdev->bdev)), 235 roundup(size, bdev_logical_block_size(rdev->bdev)),
236 page, READ)) { 236 page, READ)) {
237 page->index = index; 237 page->index = index;
238 attach_page_buffers(page, NULL); /* so that free_buffer will 238 attach_page_buffers(page, NULL); /* so that free_buffer will
@@ -287,7 +287,7 @@ static int write_sb_page(struct bitmap *bitmap, struct page *page, int wait)
287 int size = PAGE_SIZE; 287 int size = PAGE_SIZE;
288 if (page->index == bitmap->file_pages-1) 288 if (page->index == bitmap->file_pages-1)
289 size = roundup(bitmap->last_page_size, 289 size = roundup(bitmap->last_page_size,
290 bdev_hardsect_size(rdev->bdev)); 290 bdev_logical_block_size(rdev->bdev));
291 /* Just make sure we aren't corrupting data or 291 /* Just make sure we aren't corrupting data or
292 * metadata 292 * metadata
293 */ 293 */
diff --git a/drivers/md/dm-exception-store.c b/drivers/md/dm-exception-store.c
index a2e26c242141..75d8081a9041 100644
--- a/drivers/md/dm-exception-store.c
+++ b/drivers/md/dm-exception-store.c
@@ -178,7 +178,7 @@ static int set_chunk_size(struct dm_exception_store *store,
178 } 178 }
179 179
180 /* Validate the chunk size against the device block size */ 180 /* Validate the chunk size against the device block size */
181 if (chunk_size_ulong % (bdev_hardsect_size(store->cow->bdev) >> 9)) { 181 if (chunk_size_ulong % (bdev_logical_block_size(store->cow->bdev) >> 9)) {
182 *error = "Chunk size is not a multiple of device blocksize"; 182 *error = "Chunk size is not a multiple of device blocksize";
183 return -EINVAL; 183 return -EINVAL;
184 } 184 }
diff --git a/drivers/md/dm-log.c b/drivers/md/dm-log.c
index be233bc4d917..6fa8ccf91c70 100644
--- a/drivers/md/dm-log.c
+++ b/drivers/md/dm-log.c
@@ -413,7 +413,8 @@ static int create_log_context(struct dm_dirty_log *log, struct dm_target *ti,
413 * Buffer holds both header and bitset. 413 * Buffer holds both header and bitset.
414 */ 414 */
415 buf_size = dm_round_up((LOG_OFFSET << SECTOR_SHIFT) + 415 buf_size = dm_round_up((LOG_OFFSET << SECTOR_SHIFT) +
416 bitset_size, ti->limits.hardsect_size); 416 bitset_size,
417 ti->limits.logical_block_size);
417 418
418 if (buf_size > dev->bdev->bd_inode->i_size) { 419 if (buf_size > dev->bdev->bd_inode->i_size) {
419 DMWARN("log device %s too small: need %llu bytes", 420 DMWARN("log device %s too small: need %llu bytes",
diff --git a/drivers/md/dm-snap-persistent.c b/drivers/md/dm-snap-persistent.c
index e75c6dd76a9a..2662a41337e7 100644
--- a/drivers/md/dm-snap-persistent.c
+++ b/drivers/md/dm-snap-persistent.c
@@ -282,7 +282,7 @@ static int read_header(struct pstore *ps, int *new_snapshot)
282 */ 282 */
283 if (!ps->store->chunk_size) { 283 if (!ps->store->chunk_size) {
284 ps->store->chunk_size = max(DM_CHUNK_SIZE_DEFAULT_SECTORS, 284 ps->store->chunk_size = max(DM_CHUNK_SIZE_DEFAULT_SECTORS,
285 bdev_hardsect_size(ps->store->cow->bdev) >> 9); 285 bdev_logical_block_size(ps->store->cow->bdev) >> 9);
286 ps->store->chunk_mask = ps->store->chunk_size - 1; 286 ps->store->chunk_mask = ps->store->chunk_size - 1;
287 ps->store->chunk_shift = ffs(ps->store->chunk_size) - 1; 287 ps->store->chunk_shift = ffs(ps->store->chunk_size) - 1;
288 chunk_size_supplied = 0; 288 chunk_size_supplied = 0;
diff --git a/drivers/md/dm-table.c b/drivers/md/dm-table.c
index 429b50b975d5..65e2d9759857 100644
--- a/drivers/md/dm-table.c
+++ b/drivers/md/dm-table.c
@@ -108,7 +108,8 @@ static void combine_restrictions_low(struct io_restrictions *lhs,
108 lhs->max_hw_segments = 108 lhs->max_hw_segments =
109 min_not_zero(lhs->max_hw_segments, rhs->max_hw_segments); 109 min_not_zero(lhs->max_hw_segments, rhs->max_hw_segments);
110 110
111 lhs->hardsect_size = max(lhs->hardsect_size, rhs->hardsect_size); 111 lhs->logical_block_size = max(lhs->logical_block_size,
112 rhs->logical_block_size);
112 113
113 lhs->max_segment_size = 114 lhs->max_segment_size =
114 min_not_zero(lhs->max_segment_size, rhs->max_segment_size); 115 min_not_zero(lhs->max_segment_size, rhs->max_segment_size);
@@ -529,7 +530,8 @@ void dm_set_device_limits(struct dm_target *ti, struct block_device *bdev)
529 rs->max_hw_segments = 530 rs->max_hw_segments =
530 min_not_zero(rs->max_hw_segments, q->max_hw_segments); 531 min_not_zero(rs->max_hw_segments, q->max_hw_segments);
531 532
532 rs->hardsect_size = max(rs->hardsect_size, q->hardsect_size); 533 rs->logical_block_size = max(rs->logical_block_size,
534 queue_logical_block_size(q));
533 535
534 rs->max_segment_size = 536 rs->max_segment_size =
535 min_not_zero(rs->max_segment_size, q->max_segment_size); 537 min_not_zero(rs->max_segment_size, q->max_segment_size);
@@ -683,8 +685,8 @@ static void check_for_valid_limits(struct io_restrictions *rs)
683 rs->max_phys_segments = MAX_PHYS_SEGMENTS; 685 rs->max_phys_segments = MAX_PHYS_SEGMENTS;
684 if (!rs->max_hw_segments) 686 if (!rs->max_hw_segments)
685 rs->max_hw_segments = MAX_HW_SEGMENTS; 687 rs->max_hw_segments = MAX_HW_SEGMENTS;
686 if (!rs->hardsect_size) 688 if (!rs->logical_block_size)
687 rs->hardsect_size = 1 << SECTOR_SHIFT; 689 rs->logical_block_size = 1 << SECTOR_SHIFT;
688 if (!rs->max_segment_size) 690 if (!rs->max_segment_size)
689 rs->max_segment_size = MAX_SEGMENT_SIZE; 691 rs->max_segment_size = MAX_SEGMENT_SIZE;
690 if (!rs->seg_boundary_mask) 692 if (!rs->seg_boundary_mask)
@@ -914,7 +916,7 @@ void dm_table_set_restrictions(struct dm_table *t, struct request_queue *q)
914 blk_queue_max_sectors(q, t->limits.max_sectors); 916 blk_queue_max_sectors(q, t->limits.max_sectors);
915 q->max_phys_segments = t->limits.max_phys_segments; 917 q->max_phys_segments = t->limits.max_phys_segments;
916 q->max_hw_segments = t->limits.max_hw_segments; 918 q->max_hw_segments = t->limits.max_hw_segments;
917 q->hardsect_size = t->limits.hardsect_size; 919 q->logical_block_size = t->limits.logical_block_size;
918 q->max_segment_size = t->limits.max_segment_size; 920 q->max_segment_size = t->limits.max_segment_size;
919 q->max_hw_sectors = t->limits.max_hw_sectors; 921 q->max_hw_sectors = t->limits.max_hw_sectors;
920 q->seg_boundary_mask = t->limits.seg_boundary_mask; 922 q->seg_boundary_mask = t->limits.seg_boundary_mask;
diff --git a/drivers/md/md.c b/drivers/md/md.c
index fccc8343a250..4cbc19f5c304 100644
--- a/drivers/md/md.c
+++ b/drivers/md/md.c
@@ -1202,7 +1202,7 @@ static int super_1_load(mdk_rdev_t *rdev, mdk_rdev_t *refdev, int minor_version)
1202 atomic_set(&rdev->corrected_errors, le32_to_cpu(sb->cnt_corrected_read)); 1202 atomic_set(&rdev->corrected_errors, le32_to_cpu(sb->cnt_corrected_read));
1203 1203
1204 rdev->sb_size = le32_to_cpu(sb->max_dev) * 2 + 256; 1204 rdev->sb_size = le32_to_cpu(sb->max_dev) * 2 + 256;
1205 bmask = queue_hardsect_size(rdev->bdev->bd_disk->queue)-1; 1205 bmask = queue_logical_block_size(rdev->bdev->bd_disk->queue)-1;
1206 if (rdev->sb_size & bmask) 1206 if (rdev->sb_size & bmask)
1207 rdev->sb_size = (rdev->sb_size | bmask) + 1; 1207 rdev->sb_size = (rdev->sb_size | bmask) + 1;
1208 1208