diff options
author | Ming Lei <ming.lei@redhat.com> | 2018-10-12 03:53:10 -0400 |
---|---|---|
committer | Jens Axboe <axboe@kernel.dk> | 2018-10-18 09:23:40 -0400 |
commit | 744889b7cbb56a64f957e65ade7cb65fe3f35714 (patch) | |
tree | 65972a4045c33b737862a4522271ec5705f41e2c | |
parent | 7a7080b53467ad96c0b371e9d2a2a312f96f99c2 (diff) |
block: don't deal with discard limit in blkdev_issue_discard()
blk_queue_split() does respect this limit via bio splitting, so no
need to do that in blkdev_issue_discard(), then we can align to
normal bio submit(bio_add_page() & submit_bio()).
More importantly, this patch fixes one issue introduced in a22c4d7e34402cc
("block: re-add discard_granularity and alignment checks"), in which
zero discard bio may be generated in case of zero alignment.
Fixes: a22c4d7e34402ccdf3 ("block: re-add discard_granularity and alignment checks")
Cc: stable@vger.kernel.org
Cc: Ming Lin <ming.l@ssi.samsung.com>
Cc: Mike Snitzer <snitzer@redhat.com>
Cc: Christoph Hellwig <hch@lst.de>
Cc: Xiao Ni <xni@redhat.com>
Tested-by: Mariusz Dabrowski <mariusz.dabrowski@intel.com>
Signed-off-by: Ming Lei <ming.lei@redhat.com>
Signed-off-by: Jens Axboe <axboe@kernel.dk>
-rw-r--r-- | block/blk-lib.c | 28 |
1 files changed, 2 insertions, 26 deletions
diff --git a/block/blk-lib.c b/block/blk-lib.c index d1b9dd03da25..bbd44666f2b5 100644 --- a/block/blk-lib.c +++ b/block/blk-lib.c | |||
@@ -29,9 +29,7 @@ int __blkdev_issue_discard(struct block_device *bdev, sector_t sector, | |||
29 | { | 29 | { |
30 | struct request_queue *q = bdev_get_queue(bdev); | 30 | struct request_queue *q = bdev_get_queue(bdev); |
31 | struct bio *bio = *biop; | 31 | struct bio *bio = *biop; |
32 | unsigned int granularity; | ||
33 | unsigned int op; | 32 | unsigned int op; |
34 | int alignment; | ||
35 | sector_t bs_mask; | 33 | sector_t bs_mask; |
36 | 34 | ||
37 | if (!q) | 35 | if (!q) |
@@ -54,38 +52,16 @@ int __blkdev_issue_discard(struct block_device *bdev, sector_t sector, | |||
54 | if ((sector | nr_sects) & bs_mask) | 52 | if ((sector | nr_sects) & bs_mask) |
55 | return -EINVAL; | 53 | return -EINVAL; |
56 | 54 | ||
57 | /* Zero-sector (unknown) and one-sector granularities are the same. */ | ||
58 | granularity = max(q->limits.discard_granularity >> 9, 1U); | ||
59 | alignment = (bdev_discard_alignment(bdev) >> 9) % granularity; | ||
60 | |||
61 | while (nr_sects) { | 55 | while (nr_sects) { |
62 | unsigned int req_sects; | 56 | unsigned int req_sects = nr_sects; |
63 | sector_t end_sect, tmp; | 57 | sector_t end_sect; |
64 | 58 | ||
65 | /* | ||
66 | * Issue in chunks of the user defined max discard setting, | ||
67 | * ensuring that bi_size doesn't overflow | ||
68 | */ | ||
69 | req_sects = min_t(sector_t, nr_sects, | ||
70 | q->limits.max_discard_sectors); | ||
71 | if (!req_sects) | 59 | if (!req_sects) |
72 | goto fail; | 60 | goto fail; |
73 | if (req_sects > UINT_MAX >> 9) | 61 | if (req_sects > UINT_MAX >> 9) |
74 | req_sects = UINT_MAX >> 9; | 62 | req_sects = UINT_MAX >> 9; |
75 | 63 | ||
76 | /* | ||
77 | * If splitting a request, and the next starting sector would be | ||
78 | * misaligned, stop the discard at the previous aligned sector. | ||
79 | */ | ||
80 | end_sect = sector + req_sects; | 64 | end_sect = sector + req_sects; |
81 | tmp = end_sect; | ||
82 | if (req_sects < nr_sects && | ||
83 | sector_div(tmp, granularity) != alignment) { | ||
84 | end_sect = end_sect - alignment; | ||
85 | sector_div(end_sect, granularity); | ||
86 | end_sect = end_sect * granularity + alignment; | ||
87 | req_sects = end_sect - sector; | ||
88 | } | ||
89 | 65 | ||
90 | bio = next_bio(bio, 0, gfp_mask); | 66 | bio = next_bio(bio, 0, gfp_mask); |
91 | bio->bi_iter.bi_sector = sector; | 67 | bio->bi_iter.bi_sector = sector; |