aboutsummaryrefslogtreecommitdiffstats
path: root/block/blk-lib.c
diff options
context:
space:
mode:
Diffstat (limited to 'block/blk-lib.c')
-rw-r--r--block/blk-lib.c26
1 files changed, 16 insertions, 10 deletions
diff --git a/block/blk-lib.c b/block/blk-lib.c
index 9373b58dfab1..b3a1f2b70b31 100644
--- a/block/blk-lib.c
+++ b/block/blk-lib.c
@@ -43,11 +43,12 @@ int blkdev_issue_discard(struct block_device *bdev, sector_t sector,
43 DECLARE_COMPLETION_ONSTACK(wait); 43 DECLARE_COMPLETION_ONSTACK(wait);
44 struct request_queue *q = bdev_get_queue(bdev); 44 struct request_queue *q = bdev_get_queue(bdev);
45 int type = REQ_WRITE | REQ_DISCARD; 45 int type = REQ_WRITE | REQ_DISCARD;
46 unsigned int max_discard_sectors; 46 sector_t max_discard_sectors;
47 unsigned int granularity, alignment, mask; 47 sector_t granularity, alignment;
48 struct bio_batch bb; 48 struct bio_batch bb;
49 struct bio *bio; 49 struct bio *bio;
50 int ret = 0; 50 int ret = 0;
51 struct blk_plug plug;
51 52
52 if (!q) 53 if (!q)
53 return -ENXIO; 54 return -ENXIO;
@@ -57,15 +58,16 @@ int blkdev_issue_discard(struct block_device *bdev, sector_t sector,
57 58
58 /* Zero-sector (unknown) and one-sector granularities are the same. */ 59 /* Zero-sector (unknown) and one-sector granularities are the same. */
59 granularity = max(q->limits.discard_granularity >> 9, 1U); 60 granularity = max(q->limits.discard_granularity >> 9, 1U);
60 mask = granularity - 1; 61 alignment = bdev_discard_alignment(bdev) >> 9;
61 alignment = (bdev_discard_alignment(bdev) >> 9) & mask; 62 alignment = sector_div(alignment, granularity);
62 63
63 /* 64 /*
64 * Ensure that max_discard_sectors is of the proper 65 * Ensure that max_discard_sectors is of the proper
65 * granularity, so that requests stay aligned after a split. 66 * granularity, so that requests stay aligned after a split.
66 */ 67 */
67 max_discard_sectors = min(q->limits.max_discard_sectors, UINT_MAX >> 9); 68 max_discard_sectors = min(q->limits.max_discard_sectors, UINT_MAX >> 9);
68 max_discard_sectors = round_down(max_discard_sectors, granularity); 69 sector_div(max_discard_sectors, granularity);
70 max_discard_sectors *= granularity;
69 if (unlikely(!max_discard_sectors)) { 71 if (unlikely(!max_discard_sectors)) {
70 /* Avoid infinite loop below. Being cautious never hurts. */ 72 /* Avoid infinite loop below. Being cautious never hurts. */
71 return -EOPNOTSUPP; 73 return -EOPNOTSUPP;
@@ -81,9 +83,10 @@ int blkdev_issue_discard(struct block_device *bdev, sector_t sector,
81 bb.flags = 1 << BIO_UPTODATE; 83 bb.flags = 1 << BIO_UPTODATE;
82 bb.wait = &wait; 84 bb.wait = &wait;
83 85
86 blk_start_plug(&plug);
84 while (nr_sects) { 87 while (nr_sects) {
85 unsigned int req_sects; 88 unsigned int req_sects;
86 sector_t end_sect; 89 sector_t end_sect, tmp;
87 90
88 bio = bio_alloc(gfp_mask, 1); 91 bio = bio_alloc(gfp_mask, 1);
89 if (!bio) { 92 if (!bio) {
@@ -98,10 +101,12 @@ int blkdev_issue_discard(struct block_device *bdev, sector_t sector,
98 * misaligned, stop the discard at the previous aligned sector. 101 * misaligned, stop the discard at the previous aligned sector.
99 */ 102 */
100 end_sect = sector + req_sects; 103 end_sect = sector + req_sects;
101 if (req_sects < nr_sects && (end_sect & mask) != alignment) { 104 tmp = end_sect;
102 end_sect = 105 if (req_sects < nr_sects &&
103 round_down(end_sect - alignment, granularity) 106 sector_div(tmp, granularity) != alignment) {
104 + alignment; 107 end_sect = end_sect - alignment;
108 sector_div(end_sect, granularity);
109 end_sect = end_sect * granularity + alignment;
105 req_sects = end_sect - sector; 110 req_sects = end_sect - sector;
106 } 111 }
107 112
@@ -117,6 +122,7 @@ int blkdev_issue_discard(struct block_device *bdev, sector_t sector,
117 atomic_inc(&bb.done); 122 atomic_inc(&bb.done);
118 submit_bio(type, bio); 123 submit_bio(type, bio);
119 } 124 }
125 blk_finish_plug(&plug);
120 126
121 /* Wait for bios in-flight */ 127 /* Wait for bios in-flight */
122 if (!atomic_dec_and_test(&bb.done)) 128 if (!atomic_dec_and_test(&bb.done))