aboutsummaryrefslogtreecommitdiffstats
path: root/include/linux/blkdev.h
diff options
context:
space:
mode:
authorMartin K. Petersen <martin.petersen@oracle.com>2009-05-22 17:17:49 -0400
committerJens Axboe <jens.axboe@oracle.com>2009-05-22 17:22:54 -0400
commite1defc4ff0cf57aca6c5e3ff99fa503f5943c1f1 (patch)
treed60d15a082171c58ac811d547d51a9c3119f23e3 /include/linux/blkdev.h
parent9bd7de51ee8537094656149eaf45338cadb7d7d4 (diff)
block: Do away with the notion of hardsect_size
Until now we have had a 1:1 mapping between storage device physical block size and the logical block sized used when addressing the device. With SATA 4KB drives coming out that will no longer be the case. The sector size will be 4KB but the logical block size will remain 512-bytes. Hence we need to distinguish between the physical block size and the logical ditto. This patch renames hardsect_size to logical_block_size. Signed-off-by: Martin K. Petersen <martin.petersen@oracle.com> Signed-off-by: Jens Axboe <jens.axboe@oracle.com>
Diffstat (limited to 'include/linux/blkdev.h')
-rw-r--r--include/linux/blkdev.h14
1 files changed, 7 insertions, 7 deletions
diff --git a/include/linux/blkdev.h b/include/linux/blkdev.h
index 56ce53fce72e..872b78b7a101 100644
--- a/include/linux/blkdev.h
+++ b/include/linux/blkdev.h
@@ -391,7 +391,7 @@ struct request_queue
391 unsigned int max_hw_sectors; 391 unsigned int max_hw_sectors;
392 unsigned short max_phys_segments; 392 unsigned short max_phys_segments;
393 unsigned short max_hw_segments; 393 unsigned short max_hw_segments;
394 unsigned short hardsect_size; 394 unsigned short logical_block_size;
395 unsigned int max_segment_size; 395 unsigned int max_segment_size;
396 396
397 unsigned long seg_boundary_mask; 397 unsigned long seg_boundary_mask;
@@ -901,7 +901,7 @@ extern void blk_queue_max_sectors(struct request_queue *, unsigned int);
901extern void blk_queue_max_phys_segments(struct request_queue *, unsigned short); 901extern void blk_queue_max_phys_segments(struct request_queue *, unsigned short);
902extern void blk_queue_max_hw_segments(struct request_queue *, unsigned short); 902extern void blk_queue_max_hw_segments(struct request_queue *, unsigned short);
903extern void blk_queue_max_segment_size(struct request_queue *, unsigned int); 903extern void blk_queue_max_segment_size(struct request_queue *, unsigned int);
904extern void blk_queue_hardsect_size(struct request_queue *, unsigned short); 904extern void blk_queue_logical_block_size(struct request_queue *, unsigned short);
905extern void blk_queue_stack_limits(struct request_queue *t, struct request_queue *b); 905extern void blk_queue_stack_limits(struct request_queue *t, struct request_queue *b);
906extern void blk_queue_dma_pad(struct request_queue *, unsigned int); 906extern void blk_queue_dma_pad(struct request_queue *, unsigned int);
907extern void blk_queue_update_dma_pad(struct request_queue *, unsigned int); 907extern void blk_queue_update_dma_pad(struct request_queue *, unsigned int);
@@ -988,19 +988,19 @@ extern void blk_set_cmd_filter_defaults(struct blk_cmd_filter *filter);
988 988
989#define blkdev_entry_to_request(entry) list_entry((entry), struct request, queuelist) 989#define blkdev_entry_to_request(entry) list_entry((entry), struct request, queuelist)
990 990
991static inline int queue_hardsect_size(struct request_queue *q) 991static inline unsigned short queue_logical_block_size(struct request_queue *q)
992{ 992{
993 int retval = 512; 993 int retval = 512;
994 994
995 if (q && q->hardsect_size) 995 if (q && q->logical_block_size)
996 retval = q->hardsect_size; 996 retval = q->logical_block_size;
997 997
998 return retval; 998 return retval;
999} 999}
1000 1000
1001static inline int bdev_hardsect_size(struct block_device *bdev) 1001static inline unsigned short bdev_logical_block_size(struct block_device *bdev)
1002{ 1002{
1003 return queue_hardsect_size(bdev_get_queue(bdev)); 1003 return queue_logical_block_size(bdev_get_queue(bdev));
1004} 1004}
1005 1005
1006static inline int queue_dma_alignment(struct request_queue *q) 1006static inline int queue_dma_alignment(struct request_queue *q)