aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorChristoph Hellwig <hch@infradead.org>2009-09-30 07:54:20 -0400
committerJens Axboe <jens.axboe@oracle.com>2009-10-01 15:19:34 -0400
commit67efc9258010da35b27b3854d0880c7e193004ed (patch)
treed98ad032d8c701084539d5673c2d7d7ca5962a3f
parentc15227de132f1295f3db6b7df9079956b1020fd8 (diff)
block: allow large discard requests
Currently we set the bio size to the byte equivalent of the blocks to be trimmed when submitting the initial DISCARD ioctl. That means it is subject to the max_hw_sectors limitation of the HBA which is much lower than the size of a DISCARD request we can support. Add a separate max_discard_sectors tunable to limit the size for discard requests. We limit the max discard request size in bytes to 32bit as that is the limit for bio->bi_size. This could be much larger if we had a way to pass that information through the block layer. Signed-off-by: Christoph Hellwig <hch@lst.de> Signed-off-by: Jens Axboe <jens.axboe@oracle.com>
-rw-r--r--block/blk-barrier.c10
-rw-r--r--block/blk-core.c3
-rw-r--r--block/blk-settings.c13
-rw-r--r--include/linux/blkdev.h3
4 files changed, 24 insertions, 5 deletions
diff --git a/block/blk-barrier.c b/block/blk-barrier.c
index 21f5025c3945..8873b9b439ff 100644
--- a/block/blk-barrier.c
+++ b/block/blk-barrier.c
@@ -385,6 +385,8 @@ int blkdev_issue_discard(struct block_device *bdev, sector_t sector,
385 385
386 while (nr_sects && !ret) { 386 while (nr_sects && !ret) {
387 unsigned int sector_size = q->limits.logical_block_size; 387 unsigned int sector_size = q->limits.logical_block_size;
388 unsigned int max_discard_sectors =
389 min(q->limits.max_discard_sectors, UINT_MAX >> 9);
388 390
389 bio = bio_alloc(gfp_mask, 1); 391 bio = bio_alloc(gfp_mask, 1);
390 if (!bio) 392 if (!bio)
@@ -411,10 +413,10 @@ int blkdev_issue_discard(struct block_device *bdev, sector_t sector,
411 * touch many more blocks on disk than the actual payload 413 * touch many more blocks on disk than the actual payload
412 * length. 414 * length.
413 */ 415 */
414 if (nr_sects > queue_max_hw_sectors(q)) { 416 if (nr_sects > max_discard_sectors) {
415 bio->bi_size = queue_max_hw_sectors(q) << 9; 417 bio->bi_size = max_discard_sectors << 9;
416 nr_sects -= queue_max_hw_sectors(q); 418 nr_sects -= max_discard_sectors;
417 sector += queue_max_hw_sectors(q); 419 sector += max_discard_sectors;
418 } else { 420 } else {
419 bio->bi_size = nr_sects << 9; 421 bio->bi_size = nr_sects << 9;
420 nr_sects = 0; 422 nr_sects = 0;
diff --git a/block/blk-core.c b/block/blk-core.c
index 80a020dd1580..34504f309728 100644
--- a/block/blk-core.c
+++ b/block/blk-core.c
@@ -1436,7 +1436,8 @@ static inline void __generic_make_request(struct bio *bio)
1436 goto end_io; 1436 goto end_io;
1437 } 1437 }
1438 1438
1439 if (unlikely(nr_sectors > queue_max_hw_sectors(q))) { 1439 if (unlikely(!bio_rw_flagged(bio, BIO_RW_DISCARD) &&
1440 nr_sectors > queue_max_hw_sectors(q))) {
1440 printk(KERN_ERR "bio too big device %s (%u > %u)\n", 1441 printk(KERN_ERR "bio too big device %s (%u > %u)\n",
1441 bdevname(bio->bi_bdev, b), 1442 bdevname(bio->bi_bdev, b),
1442 bio_sectors(bio), 1443 bio_sectors(bio),
diff --git a/block/blk-settings.c b/block/blk-settings.c
index d29498ef1eb5..e0695bca7027 100644
--- a/block/blk-settings.c
+++ b/block/blk-settings.c
@@ -96,6 +96,7 @@ void blk_set_default_limits(struct queue_limits *lim)
96 lim->max_segment_size = MAX_SEGMENT_SIZE; 96 lim->max_segment_size = MAX_SEGMENT_SIZE;
97 lim->max_sectors = BLK_DEF_MAX_SECTORS; 97 lim->max_sectors = BLK_DEF_MAX_SECTORS;
98 lim->max_hw_sectors = INT_MAX; 98 lim->max_hw_sectors = INT_MAX;
99 lim->max_discard_sectors = SAFE_MAX_SECTORS;
99 lim->logical_block_size = lim->physical_block_size = lim->io_min = 512; 100 lim->logical_block_size = lim->physical_block_size = lim->io_min = 512;
100 lim->bounce_pfn = (unsigned long)(BLK_BOUNCE_ANY >> PAGE_SHIFT); 101 lim->bounce_pfn = (unsigned long)(BLK_BOUNCE_ANY >> PAGE_SHIFT);
101 lim->alignment_offset = 0; 102 lim->alignment_offset = 0;
@@ -239,6 +240,18 @@ void blk_queue_max_hw_sectors(struct request_queue *q, unsigned int max_sectors)
239EXPORT_SYMBOL(blk_queue_max_hw_sectors); 240EXPORT_SYMBOL(blk_queue_max_hw_sectors);
240 241
241/** 242/**
243 * blk_queue_max_discard_sectors - set max sectors for a single discard
244 * @q: the request queue for the device
245 * @max_discard: maximum number of sectors to discard
246 **/
247void blk_queue_max_discard_sectors(struct request_queue *q,
248 unsigned int max_discard_sectors)
249{
250 q->limits.max_discard_sectors = max_discard_sectors;
251}
252EXPORT_SYMBOL(blk_queue_max_discard_sectors);
253
254/**
242 * blk_queue_max_phys_segments - set max phys segments for a request for this queue 255 * blk_queue_max_phys_segments - set max phys segments for a request for this queue
243 * @q: the request queue for the device 256 * @q: the request queue for the device
244 * @max_segments: max number of segments 257 * @max_segments: max number of segments
diff --git a/include/linux/blkdev.h b/include/linux/blkdev.h
index f62d45e87618..1a03b715dfad 100644
--- a/include/linux/blkdev.h
+++ b/include/linux/blkdev.h
@@ -311,6 +311,7 @@ struct queue_limits {
311 unsigned int alignment_offset; 311 unsigned int alignment_offset;
312 unsigned int io_min; 312 unsigned int io_min;
313 unsigned int io_opt; 313 unsigned int io_opt;
314 unsigned int max_discard_sectors;
314 315
315 unsigned short logical_block_size; 316 unsigned short logical_block_size;
316 unsigned short max_hw_segments; 317 unsigned short max_hw_segments;
@@ -928,6 +929,8 @@ extern void blk_queue_max_hw_sectors(struct request_queue *, unsigned int);
928extern void blk_queue_max_phys_segments(struct request_queue *, unsigned short); 929extern void blk_queue_max_phys_segments(struct request_queue *, unsigned short);
929extern void blk_queue_max_hw_segments(struct request_queue *, unsigned short); 930extern void blk_queue_max_hw_segments(struct request_queue *, unsigned short);
930extern void blk_queue_max_segment_size(struct request_queue *, unsigned int); 931extern void blk_queue_max_segment_size(struct request_queue *, unsigned int);
932extern void blk_queue_max_discard_sectors(struct request_queue *q,
933 unsigned int max_discard_sectors);
931extern void blk_queue_logical_block_size(struct request_queue *, unsigned short); 934extern void blk_queue_logical_block_size(struct request_queue *, unsigned short);
932extern void blk_queue_physical_block_size(struct request_queue *, unsigned short); 935extern void blk_queue_physical_block_size(struct request_queue *, unsigned short);
933extern void blk_queue_alignment_offset(struct request_queue *q, 936extern void blk_queue_alignment_offset(struct request_queue *q,