aboutsummaryrefslogtreecommitdiffstats
path: root/include
diff options
context:
space:
mode:
authorChristoph Hellwig <hch@infradead.org>2009-09-30 07:54:20 -0400
committerJens Axboe <jens.axboe@oracle.com>2009-10-01 15:15:46 -0400
commitca80650cfbde5b17a5fa957a261c7973f84599a7 (patch)
treed98ad032d8c701084539d5673c2d7d7ca5962a3f /include
parent1122a26f2abe4245ccdaed95ec23f63fe086b332 (diff)
block: allow large discard requests
Currently we set the bio size to the byte equivalent of the blocks to be trimmed when submitting the initial DISCARD ioctl. That means it is subject to the max_hw_sectors limitation of the HBA which is much lower than the size of a DISCARD request we can support. Add a separate max_discard_sectors tunable to limit the size for discard requests. We limit the max discard request size in bytes to 32bit as that is the limit for bio->bi_size. This could be much larger if we had a way to pass that information through the block layer. Signed-off-by: Christoph Hellwig <hch@lst.de> Signed-off-by: Jens Axboe <jens.axboe@oracle.com>
Diffstat (limited to 'include')
-rw-r--r--include/linux/blkdev.h3
1 files changed, 3 insertions, 0 deletions
diff --git a/include/linux/blkdev.h b/include/linux/blkdev.h
index f62d45e87618..1a03b715dfad 100644
--- a/include/linux/blkdev.h
+++ b/include/linux/blkdev.h
@@ -311,6 +311,7 @@ struct queue_limits {
311 unsigned int alignment_offset; 311 unsigned int alignment_offset;
312 unsigned int io_min; 312 unsigned int io_min;
313 unsigned int io_opt; 313 unsigned int io_opt;
314 unsigned int max_discard_sectors;
314 315
315 unsigned short logical_block_size; 316 unsigned short logical_block_size;
316 unsigned short max_hw_segments; 317 unsigned short max_hw_segments;
@@ -928,6 +929,8 @@ extern void blk_queue_max_hw_sectors(struct request_queue *, unsigned int);
928extern void blk_queue_max_phys_segments(struct request_queue *, unsigned short); 929extern void blk_queue_max_phys_segments(struct request_queue *, unsigned short);
929extern void blk_queue_max_hw_segments(struct request_queue *, unsigned short); 930extern void blk_queue_max_hw_segments(struct request_queue *, unsigned short);
930extern void blk_queue_max_segment_size(struct request_queue *, unsigned int); 931extern void blk_queue_max_segment_size(struct request_queue *, unsigned int);
932extern void blk_queue_max_discard_sectors(struct request_queue *q,
933 unsigned int max_discard_sectors);
931extern void blk_queue_logical_block_size(struct request_queue *, unsigned short); 934extern void blk_queue_logical_block_size(struct request_queue *, unsigned short);
932extern void blk_queue_physical_block_size(struct request_queue *, unsigned short); 935extern void blk_queue_physical_block_size(struct request_queue *, unsigned short);
933extern void blk_queue_alignment_offset(struct request_queue *q, 936extern void blk_queue_alignment_offset(struct request_queue *q,