aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorJens Axboe <axboe@fb.com>2014-06-05 15:38:39 -0400
committerJens Axboe <axboe@fb.com>2014-06-05 15:38:39 -0400
commit762380ad9322951cea4ce9d24864265f9c66a916 (patch)
tree9ec3fe551583dcb9243d02a9728511e99216dcdc
parent046f153343e33dcad1be7f6249ea6ff1c6fd9b58 (diff)
block: add notion of a chunk size for request merging
Some drivers have different limits on what size a request should optimally be, depending on the offset of the request. Similar to dividing a device into chunks. Add a setting that allows the driver to inform the block layer of such a chunk size. The block layer will then prevent merging across the chunks. This is needed to optimally support NVMe with a non-zero stripe size. Signed-off-by: Jens Axboe <axboe@fb.com>
-rw-r--r--block/bio.c3
-rw-r--r--block/blk-settings.c18
-rw-r--r--include/linux/blkdev.h22
3 files changed, 41 insertions, 2 deletions
diff --git a/block/bio.c b/block/bio.c
index 96d28eee8a1e..97e832cc9b9c 100644
--- a/block/bio.c
+++ b/block/bio.c
@@ -849,7 +849,8 @@ int bio_add_page(struct bio *bio, struct page *page, unsigned int len,
849 unsigned int offset) 849 unsigned int offset)
850{ 850{
851 struct request_queue *q = bdev_get_queue(bio->bi_bdev); 851 struct request_queue *q = bdev_get_queue(bio->bi_bdev);
852 return __bio_add_page(q, bio, page, len, offset, queue_max_sectors(q)); 852
853 return __bio_add_page(q, bio, page, len, offset, blk_max_size_offset(q, bio->bi_iter.bi_sector));
853} 854}
854EXPORT_SYMBOL(bio_add_page); 855EXPORT_SYMBOL(bio_add_page);
855 856
diff --git a/block/blk-settings.c b/block/blk-settings.c
index 5d21239bc859..a2b9cb195e70 100644
--- a/block/blk-settings.c
+++ b/block/blk-settings.c
@@ -113,6 +113,7 @@ void blk_set_default_limits(struct queue_limits *lim)
113 lim->seg_boundary_mask = BLK_SEG_BOUNDARY_MASK; 113 lim->seg_boundary_mask = BLK_SEG_BOUNDARY_MASK;
114 lim->max_segment_size = BLK_MAX_SEGMENT_SIZE; 114 lim->max_segment_size = BLK_MAX_SEGMENT_SIZE;
115 lim->max_sectors = lim->max_hw_sectors = BLK_SAFE_MAX_SECTORS; 115 lim->max_sectors = lim->max_hw_sectors = BLK_SAFE_MAX_SECTORS;
116 lim->chunk_sectors = 0;
116 lim->max_write_same_sectors = 0; 117 lim->max_write_same_sectors = 0;
117 lim->max_discard_sectors = 0; 118 lim->max_discard_sectors = 0;
118 lim->discard_granularity = 0; 119 lim->discard_granularity = 0;
@@ -277,6 +278,23 @@ void blk_queue_max_hw_sectors(struct request_queue *q, unsigned int max_hw_secto
277EXPORT_SYMBOL(blk_queue_max_hw_sectors); 278EXPORT_SYMBOL(blk_queue_max_hw_sectors);
278 279
279/** 280/**
281 * blk_queue_chunk_sectors - set size of the chunk for this queue
282 * @q: the request queue for the device
283 * @chunk_sectors: chunk sectors in the usual 512b unit
284 *
285 * Description:
286 * If a driver doesn't want IOs to cross a given chunk size, it can set
287 * this limit and prevent merging across chunks. Note that the chunk size
288 * must currently be a power-of-2 in sectors.
289 **/
290void blk_queue_chunk_sectors(struct request_queue *q, unsigned int chunk_sectors)
291{
292 BUG_ON(!is_power_of_2(chunk_sectors));
293 q->limits.chunk_sectors = chunk_sectors;
294}
295EXPORT_SYMBOL(blk_queue_chunk_sectors);
296
297/**
280 * blk_queue_max_discard_sectors - set max sectors for a single discard 298 * blk_queue_max_discard_sectors - set max sectors for a single discard
281 * @q: the request queue for the device 299 * @q: the request queue for the device
282 * @max_discard_sectors: maximum number of sectors to discard 300 * @max_discard_sectors: maximum number of sectors to discard
diff --git a/include/linux/blkdev.h b/include/linux/blkdev.h
index 3cd426e971db..dc2c703f05fd 100644
--- a/include/linux/blkdev.h
+++ b/include/linux/blkdev.h
@@ -280,6 +280,7 @@ struct queue_limits {
280 unsigned long seg_boundary_mask; 280 unsigned long seg_boundary_mask;
281 281
282 unsigned int max_hw_sectors; 282 unsigned int max_hw_sectors;
283 unsigned int chunk_sectors;
283 unsigned int max_sectors; 284 unsigned int max_sectors;
284 unsigned int max_segment_size; 285 unsigned int max_segment_size;
285 unsigned int physical_block_size; 286 unsigned int physical_block_size;
@@ -910,6 +911,20 @@ static inline unsigned int blk_queue_get_max_sectors(struct request_queue *q,
910 return q->limits.max_sectors; 911 return q->limits.max_sectors;
911} 912}
912 913
914/*
915 * Return maximum size of a request at given offset. Only valid for
916 * file system requests.
917 */
918static inline unsigned int blk_max_size_offset(struct request_queue *q,
919 sector_t offset)
920{
921 if (!q->limits.chunk_sectors)
922 return q->limits.max_hw_sectors;
923
924 return q->limits.chunk_sectors -
925 (offset & (q->limits.chunk_sectors - 1));
926}
927
913static inline unsigned int blk_rq_get_max_sectors(struct request *rq) 928static inline unsigned int blk_rq_get_max_sectors(struct request *rq)
914{ 929{
915 struct request_queue *q = rq->q; 930 struct request_queue *q = rq->q;
@@ -917,7 +932,11 @@ static inline unsigned int blk_rq_get_max_sectors(struct request *rq)
917 if (unlikely(rq->cmd_type == REQ_TYPE_BLOCK_PC)) 932 if (unlikely(rq->cmd_type == REQ_TYPE_BLOCK_PC))
918 return q->limits.max_hw_sectors; 933 return q->limits.max_hw_sectors;
919 934
920 return blk_queue_get_max_sectors(q, rq->cmd_flags); 935 if (!q->limits.chunk_sectors)
936 return blk_queue_get_max_sectors(q, rq->cmd_flags);
937
938 return min(blk_max_size_offset(q, blk_rq_pos(rq)),
939 blk_queue_get_max_sectors(q, rq->cmd_flags));
921} 940}
922 941
923static inline unsigned int blk_rq_count_bios(struct request *rq) 942static inline unsigned int blk_rq_count_bios(struct request *rq)
@@ -983,6 +1002,7 @@ extern void blk_queue_make_request(struct request_queue *, make_request_fn *);
983extern void blk_queue_bounce_limit(struct request_queue *, u64); 1002extern void blk_queue_bounce_limit(struct request_queue *, u64);
984extern void blk_limits_max_hw_sectors(struct queue_limits *, unsigned int); 1003extern void blk_limits_max_hw_sectors(struct queue_limits *, unsigned int);
985extern void blk_queue_max_hw_sectors(struct request_queue *, unsigned int); 1004extern void blk_queue_max_hw_sectors(struct request_queue *, unsigned int);
1005extern void blk_queue_chunk_sectors(struct request_queue *, unsigned int);
986extern void blk_queue_max_segments(struct request_queue *, unsigned short); 1006extern void blk_queue_max_segments(struct request_queue *, unsigned short);
987extern void blk_queue_max_segment_size(struct request_queue *, unsigned int); 1007extern void blk_queue_max_segment_size(struct request_queue *, unsigned int);
988extern void blk_queue_max_discard_sectors(struct request_queue *q, 1008extern void blk_queue_max_discard_sectors(struct request_queue *q,