aboutsummaryrefslogtreecommitdiffstats
path: root/block/blk-lib.c
diff options
context:
space:
mode:
authorPaolo Bonzini <pbonzini@redhat.com>2012-08-02 03:48:50 -0400
committerJens Axboe <axboe@kernel.dk>2012-08-02 03:48:50 -0400
commitc6e666345e1b79c62ba82339cc7d55a89cb73f88 (patch)
tree9ffc17b7010ba16a18482d5f9057374dae90b669 /block/blk-lib.c
parentf6ff53d3611b564661896be23369b54d84941a0e (diff)
block: split discard into aligned requests
When a disk has large discard_granularity and small max_discard_sectors, discards are not split with optimal alignment. In the limit case of discard_granularity == max_discard_sectors, no request could be aligned correctly, so in fact you might end up with no discarded logical blocks at all. Another example that helps showing the condition in the patch is with discard_granularity == 64, max_discard_sectors == 128. A request that is submitted for 256 sectors 2..257 will be split in two: 2..129, 130..257. However, only 2 aligned blocks out of 3 are included in the request; 128..191 may be left intact and not discarded. With this patch, the first request will be truncated to ensure good alignment of what's left, and the split will be 2..127, 128..255, 256..257. The patch will also take into account the discard_alignment. At most one extra request will be introduced, because the first request will be reduced by at most granularity-1 sectors, and granularity must be less than max_discard_sectors. Subsequent requests will run on round_down(max_discard_sectors, granularity) sectors, as in the current code. Signed-off-by: Paolo Bonzini <pbonzini@redhat.com> Acked-by: Vivek Goyal <vgoyal@redhat.com> Tested-by: Mike Snitzer <snitzer@redhat.com> Signed-off-by: Jens Axboe <axboe@kernel.dk>
Diffstat (limited to 'block/blk-lib.c')
-rw-r--r--block/blk-lib.c34
1 files changed, 24 insertions, 10 deletions
diff --git a/block/blk-lib.c b/block/blk-lib.c
index 16b06f62e68c..19cc761cacb2 100644
--- a/block/blk-lib.c
+++ b/block/blk-lib.c
@@ -44,7 +44,7 @@ int blkdev_issue_discard(struct block_device *bdev, sector_t sector,
44 struct request_queue *q = bdev_get_queue(bdev); 44 struct request_queue *q = bdev_get_queue(bdev);
45 int type = REQ_WRITE | REQ_DISCARD; 45 int type = REQ_WRITE | REQ_DISCARD;
46 unsigned int max_discard_sectors; 46 unsigned int max_discard_sectors;
47 unsigned int granularity; 47 unsigned int granularity, alignment, mask;
48 struct bio_batch bb; 48 struct bio_batch bb;
49 struct bio *bio; 49 struct bio *bio;
50 int ret = 0; 50 int ret = 0;
@@ -57,10 +57,12 @@ int blkdev_issue_discard(struct block_device *bdev, sector_t sector,
57 57
58 /* Zero-sector (unknown) and one-sector granularities are the same. */ 58 /* Zero-sector (unknown) and one-sector granularities are the same. */
59 granularity = max(q->limits.discard_granularity >> 9, 1U); 59 granularity = max(q->limits.discard_granularity >> 9, 1U);
60 mask = granularity - 1;
61 alignment = (bdev_discard_alignment(bdev) >> 9) & mask;
60 62
61 /* 63 /*
62 * Ensure that max_discard_sectors is of the proper 64 * Ensure that max_discard_sectors is of the proper
63 * granularity 65 * granularity, so that requests stay aligned after a split.
64 */ 66 */
65 max_discard_sectors = min(q->limits.max_discard_sectors, UINT_MAX >> 9); 67 max_discard_sectors = min(q->limits.max_discard_sectors, UINT_MAX >> 9);
66 max_discard_sectors = round_down(max_discard_sectors, granularity); 68 max_discard_sectors = round_down(max_discard_sectors, granularity);
@@ -80,25 +82,37 @@ int blkdev_issue_discard(struct block_device *bdev, sector_t sector,
80 bb.wait = &wait; 82 bb.wait = &wait;
81 83
82 while (nr_sects) { 84 while (nr_sects) {
85 unsigned int req_sects;
86 sector_t end_sect;
87
83 bio = bio_alloc(gfp_mask, 1); 88 bio = bio_alloc(gfp_mask, 1);
84 if (!bio) { 89 if (!bio) {
85 ret = -ENOMEM; 90 ret = -ENOMEM;
86 break; 91 break;
87 } 92 }
88 93
94 req_sects = min_t(sector_t, nr_sects, max_discard_sectors);
95
96 /*
97 * If splitting a request, and the next starting sector would be
98 * misaligned, stop the discard at the previous aligned sector.
99 */
100 end_sect = sector + req_sects;
101 if (req_sects < nr_sects && (end_sect & mask) != alignment) {
102 end_sect =
103 round_down(end_sect - alignment, granularity)
104 + alignment;
105 req_sects = end_sect - sector;
106 }
107
89 bio->bi_sector = sector; 108 bio->bi_sector = sector;
90 bio->bi_end_io = bio_batch_end_io; 109 bio->bi_end_io = bio_batch_end_io;
91 bio->bi_bdev = bdev; 110 bio->bi_bdev = bdev;
92 bio->bi_private = &bb; 111 bio->bi_private = &bb;
93 112
94 if (nr_sects > max_discard_sectors) { 113 bio->bi_size = req_sects << 9;
95 bio->bi_size = max_discard_sectors << 9; 114 nr_sects -= req_sects;
96 nr_sects -= max_discard_sectors; 115 sector = end_sect;
97 sector += max_discard_sectors;
98 } else {
99 bio->bi_size = nr_sects << 9;
100 nr_sects = 0;
101 }
102 116
103 atomic_inc(&bb.done); 117 atomic_inc(&bb.done);
104 submit_bio(type, bio); 118 submit_bio(type, bio);