diff options
Diffstat (limited to 'block/blk-lib.c')
-rw-r--r-- | block/blk-lib.c | 41 |
1 files changed, 28 insertions, 13 deletions
diff --git a/block/blk-lib.c b/block/blk-lib.c index 2b461b496a78..19cc761cacb2 100644 --- a/block/blk-lib.c +++ b/block/blk-lib.c | |||
@@ -44,6 +44,7 @@ int blkdev_issue_discard(struct block_device *bdev, sector_t sector, | |||
44 | struct request_queue *q = bdev_get_queue(bdev); | 44 | struct request_queue *q = bdev_get_queue(bdev); |
45 | int type = REQ_WRITE | REQ_DISCARD; | 45 | int type = REQ_WRITE | REQ_DISCARD; |
46 | unsigned int max_discard_sectors; | 46 | unsigned int max_discard_sectors; |
47 | unsigned int granularity, alignment, mask; | ||
47 | struct bio_batch bb; | 48 | struct bio_batch bb; |
48 | struct bio *bio; | 49 | struct bio *bio; |
49 | int ret = 0; | 50 | int ret = 0; |
@@ -54,18 +55,20 @@ int blkdev_issue_discard(struct block_device *bdev, sector_t sector, | |||
54 | if (!blk_queue_discard(q)) | 55 | if (!blk_queue_discard(q)) |
55 | return -EOPNOTSUPP; | 56 | return -EOPNOTSUPP; |
56 | 57 | ||
58 | /* Zero-sector (unknown) and one-sector granularities are the same. */ | ||
59 | granularity = max(q->limits.discard_granularity >> 9, 1U); | ||
60 | mask = granularity - 1; | ||
61 | alignment = (bdev_discard_alignment(bdev) >> 9) & mask; | ||
62 | |||
57 | /* | 63 | /* |
58 | * Ensure that max_discard_sectors is of the proper | 64 | * Ensure that max_discard_sectors is of the proper |
59 | * granularity | 65 | * granularity, so that requests stay aligned after a split. |
60 | */ | 66 | */ |
61 | max_discard_sectors = min(q->limits.max_discard_sectors, UINT_MAX >> 9); | 67 | max_discard_sectors = min(q->limits.max_discard_sectors, UINT_MAX >> 9); |
68 | max_discard_sectors = round_down(max_discard_sectors, granularity); | ||
62 | if (unlikely(!max_discard_sectors)) { | 69 | if (unlikely(!max_discard_sectors)) { |
63 | /* Avoid infinite loop below. Being cautious never hurts. */ | 70 | /* Avoid infinite loop below. Being cautious never hurts. */ |
64 | return -EOPNOTSUPP; | 71 | return -EOPNOTSUPP; |
65 | } else if (q->limits.discard_granularity) { | ||
66 | unsigned int disc_sects = q->limits.discard_granularity >> 9; | ||
67 | |||
68 | max_discard_sectors &= ~(disc_sects - 1); | ||
69 | } | 72 | } |
70 | 73 | ||
71 | if (flags & BLKDEV_DISCARD_SECURE) { | 74 | if (flags & BLKDEV_DISCARD_SECURE) { |
@@ -79,25 +82,37 @@ int blkdev_issue_discard(struct block_device *bdev, sector_t sector, | |||
79 | bb.wait = &wait; | 82 | bb.wait = &wait; |
80 | 83 | ||
81 | while (nr_sects) { | 84 | while (nr_sects) { |
85 | unsigned int req_sects; | ||
86 | sector_t end_sect; | ||
87 | |||
82 | bio = bio_alloc(gfp_mask, 1); | 88 | bio = bio_alloc(gfp_mask, 1); |
83 | if (!bio) { | 89 | if (!bio) { |
84 | ret = -ENOMEM; | 90 | ret = -ENOMEM; |
85 | break; | 91 | break; |
86 | } | 92 | } |
87 | 93 | ||
94 | req_sects = min_t(sector_t, nr_sects, max_discard_sectors); | ||
95 | |||
96 | /* | ||
97 | * If splitting a request, and the next starting sector would be | ||
98 | * misaligned, stop the discard at the previous aligned sector. | ||
99 | */ | ||
100 | end_sect = sector + req_sects; | ||
101 | if (req_sects < nr_sects && (end_sect & mask) != alignment) { | ||
102 | end_sect = | ||
103 | round_down(end_sect - alignment, granularity) | ||
104 | + alignment; | ||
105 | req_sects = end_sect - sector; | ||
106 | } | ||
107 | |||
88 | bio->bi_sector = sector; | 108 | bio->bi_sector = sector; |
89 | bio->bi_end_io = bio_batch_end_io; | 109 | bio->bi_end_io = bio_batch_end_io; |
90 | bio->bi_bdev = bdev; | 110 | bio->bi_bdev = bdev; |
91 | bio->bi_private = &bb; | 111 | bio->bi_private = &bb; |
92 | 112 | ||
93 | if (nr_sects > max_discard_sectors) { | 113 | bio->bi_size = req_sects << 9; |
94 | bio->bi_size = max_discard_sectors << 9; | 114 | nr_sects -= req_sects; |
95 | nr_sects -= max_discard_sectors; | 115 | sector = end_sect; |
96 | sector += max_discard_sectors; | ||
97 | } else { | ||
98 | bio->bi_size = nr_sects << 9; | ||
99 | nr_sects = 0; | ||
100 | } | ||
101 | 116 | ||
102 | atomic_inc(&bb.done); | 117 | atomic_inc(&bb.done); |
103 | submit_bio(type, bio); | 118 | submit_bio(type, bio); |