aboutsummaryrefslogtreecommitdiffstats
path: root/include/linux/blkdev.h
diff options
context:
space:
mode:
authorMartin K. Petersen <martin.petersen@oracle.com>2009-05-22 17:17:53 -0400
committerJens Axboe <jens.axboe@oracle.com>2009-05-22 17:22:55 -0400
commitc72758f33784e5e2a1a4bb9421ef3e6de8f9fcf3 (patch)
treea83f7540cc894caafe74db911cba3998d6a9a164 /include/linux/blkdev.h
parentcd43e26f071524647e660706b784ebcbefbd2e44 (diff)
block: Export I/O topology for block devices and partitions
To support devices with physical block sizes bigger than 512 bytes we need to ensure proper alignment. This patch adds support for exposing I/O topology characteristics as devices are stacked. logical_block_size is the smallest unit the device can address. physical_block_size indicates the smallest I/O the device can write without incurring a read-modify-write penalty. The io_min parameter is the smallest preferred I/O size reported by the device. In many cases this is the same as the physical block size. However, the io_min parameter can be scaled up when stacking (RAID5 chunk size > physical block size). The io_opt characteristic indicates the optimal I/O size reported by the device. This is usually the stripe width for arrays. The alignment_offset parameter indicates the number of bytes the start of the device/partition is offset from the device's natural alignment. Partition tools and MD/DM utilities can use this to pad their offsets so filesystems start on proper boundaries. Signed-off-by: Martin K. Petersen <martin.petersen@oracle.com> Signed-off-by: Jens Axboe <jens.axboe@oracle.com>
Diffstat (limited to 'include/linux/blkdev.h')
-rw-r--r--include/linux/blkdev.h47
1 files changed, 47 insertions, 0 deletions
diff --git a/include/linux/blkdev.h b/include/linux/blkdev.h
index b7bb6fdba12c..5e740a135e73 100644
--- a/include/linux/blkdev.h
+++ b/include/linux/blkdev.h
@@ -314,11 +314,16 @@ struct queue_limits {
314 unsigned int max_hw_sectors; 314 unsigned int max_hw_sectors;
315 unsigned int max_sectors; 315 unsigned int max_sectors;
316 unsigned int max_segment_size; 316 unsigned int max_segment_size;
317 unsigned int physical_block_size;
318 unsigned int alignment_offset;
319 unsigned int io_min;
320 unsigned int io_opt;
317 321
318 unsigned short logical_block_size; 322 unsigned short logical_block_size;
319 unsigned short max_hw_segments; 323 unsigned short max_hw_segments;
320 unsigned short max_phys_segments; 324 unsigned short max_phys_segments;
321 325
326 unsigned char misaligned;
322 unsigned char no_cluster; 327 unsigned char no_cluster;
323}; 328};
324 329
@@ -911,6 +916,15 @@ extern void blk_queue_max_phys_segments(struct request_queue *, unsigned short);
911extern void blk_queue_max_hw_segments(struct request_queue *, unsigned short); 916extern void blk_queue_max_hw_segments(struct request_queue *, unsigned short);
912extern void blk_queue_max_segment_size(struct request_queue *, unsigned int); 917extern void blk_queue_max_segment_size(struct request_queue *, unsigned int);
913extern void blk_queue_logical_block_size(struct request_queue *, unsigned short); 918extern void blk_queue_logical_block_size(struct request_queue *, unsigned short);
919extern void blk_queue_physical_block_size(struct request_queue *, unsigned short);
920extern void blk_queue_alignment_offset(struct request_queue *q,
921 unsigned int alignment);
922extern void blk_queue_io_min(struct request_queue *q, unsigned int min);
923extern void blk_queue_io_opt(struct request_queue *q, unsigned int opt);
924extern int blk_stack_limits(struct queue_limits *t, struct queue_limits *b,
925 sector_t offset);
926extern void disk_stack_limits(struct gendisk *disk, struct block_device *bdev,
927 sector_t offset);
914extern void blk_queue_stack_limits(struct request_queue *t, struct request_queue *b); 928extern void blk_queue_stack_limits(struct request_queue *t, struct request_queue *b);
915extern void blk_queue_dma_pad(struct request_queue *, unsigned int); 929extern void blk_queue_dma_pad(struct request_queue *, unsigned int);
916extern void blk_queue_update_dma_pad(struct request_queue *, unsigned int); 930extern void blk_queue_update_dma_pad(struct request_queue *, unsigned int);
@@ -1047,6 +1061,39 @@ static inline unsigned short bdev_logical_block_size(struct block_device *bdev)
1047 return queue_logical_block_size(bdev_get_queue(bdev)); 1061 return queue_logical_block_size(bdev_get_queue(bdev));
1048} 1062}
1049 1063
1064static inline unsigned int queue_physical_block_size(struct request_queue *q)
1065{
1066 return q->limits.physical_block_size;
1067}
1068
1069static inline unsigned int queue_io_min(struct request_queue *q)
1070{
1071 return q->limits.io_min;
1072}
1073
1074static inline unsigned int queue_io_opt(struct request_queue *q)
1075{
1076 return q->limits.io_opt;
1077}
1078
1079static inline int queue_alignment_offset(struct request_queue *q)
1080{
1081 if (q && q->limits.misaligned)
1082 return -1;
1083
1084 if (q && q->limits.alignment_offset)
1085 return q->limits.alignment_offset;
1086
1087 return 0;
1088}
1089
1090static inline int queue_sector_alignment_offset(struct request_queue *q,
1091 sector_t sector)
1092{
1093 return ((sector << 9) - q->limits.alignment_offset)
1094 & (q->limits.io_min - 1);
1095}
1096
1050static inline int queue_dma_alignment(struct request_queue *q) 1097static inline int queue_dma_alignment(struct request_queue *q)
1051{ 1098{
1052 return q ? q->dma_alignment : 511; 1099 return q ? q->dma_alignment : 511;