aboutsummaryrefslogtreecommitdiffstats
path: root/include/linux/blkdev.h
diff options
context:
space:
mode:
authorMartin K. Petersen <martin.petersen@oracle.com>2009-11-10 05:50:21 -0500
committerJens Axboe <jens.axboe@oracle.com>2009-11-10 05:50:21 -0500
commit86b37281411cf1e9bc0a6b5406c45edb7bd9ea5d (patch)
tree729db57dd52054af1bc16b4afb131093dfc9d255 /include/linux/blkdev.h
parentcf7c25cf91f632a3528669fc0876e1fc8355ff9b (diff)
block: Expose discard granularity
While SSDs track block usage on a per-sector basis, RAID arrays often have allocation blocks that are bigger. Allow the discard granularity and alignment to be set and teach the topology stacking logic how to handle them. Signed-off-by: Martin K. Petersen <martin.petersen@oracle.com> Signed-off-by: Jens Axboe <jens.axboe@oracle.com>
Diffstat (limited to 'include/linux/blkdev.h')
-rw-r--r--include/linux/blkdev.h18
1 files changed, 18 insertions, 0 deletions
diff --git a/include/linux/blkdev.h b/include/linux/blkdev.h
index 39c601f783a0..1cc02972fbe2 100644
--- a/include/linux/blkdev.h
+++ b/include/linux/blkdev.h
@@ -312,12 +312,15 @@ struct queue_limits {
312 unsigned int io_min; 312 unsigned int io_min;
313 unsigned int io_opt; 313 unsigned int io_opt;
314 unsigned int max_discard_sectors; 314 unsigned int max_discard_sectors;
315 unsigned int discard_granularity;
316 unsigned int discard_alignment;
315 317
316 unsigned short logical_block_size; 318 unsigned short logical_block_size;
317 unsigned short max_hw_segments; 319 unsigned short max_hw_segments;
318 unsigned short max_phys_segments; 320 unsigned short max_phys_segments;
319 321
320 unsigned char misaligned; 322 unsigned char misaligned;
323 unsigned char discard_misaligned;
321 unsigned char no_cluster; 324 unsigned char no_cluster;
322}; 325};
323 326
@@ -1121,6 +1124,21 @@ static inline int bdev_alignment_offset(struct block_device *bdev)
1121 return q->limits.alignment_offset; 1124 return q->limits.alignment_offset;
1122} 1125}
1123 1126
1127static inline int queue_discard_alignment(struct request_queue *q)
1128{
1129 if (q->limits.discard_misaligned)
1130 return -1;
1131
1132 return q->limits.discard_alignment;
1133}
1134
1135static inline int queue_sector_discard_alignment(struct request_queue *q,
1136 sector_t sector)
1137{
1138 return ((sector << 9) - q->limits.discard_alignment)
1139 & (q->limits.discard_granularity - 1);
1140}
1141
1124static inline int queue_dma_alignment(struct request_queue *q) 1142static inline int queue_dma_alignment(struct request_queue *q)
1125{ 1143{
1126 return q ? q->dma_alignment : 511; 1144 return q ? q->dma_alignment : 511;