diff options
-rw-r--r-- | block/blk-lib.c | 34 | ||||
-rw-r--r-- | include/linux/blkdev.h | 10 |
2 files changed, 34 insertions, 10 deletions
diff --git a/block/blk-lib.c b/block/blk-lib.c index 16b06f62e68c..19cc761cacb2 100644 --- a/block/blk-lib.c +++ b/block/blk-lib.c | |||
@@ -44,7 +44,7 @@ int blkdev_issue_discard(struct block_device *bdev, sector_t sector, | |||
44 | struct request_queue *q = bdev_get_queue(bdev); | 44 | struct request_queue *q = bdev_get_queue(bdev); |
45 | int type = REQ_WRITE | REQ_DISCARD; | 45 | int type = REQ_WRITE | REQ_DISCARD; |
46 | unsigned int max_discard_sectors; | 46 | unsigned int max_discard_sectors; |
47 | unsigned int granularity; | 47 | unsigned int granularity, alignment, mask; |
48 | struct bio_batch bb; | 48 | struct bio_batch bb; |
49 | struct bio *bio; | 49 | struct bio *bio; |
50 | int ret = 0; | 50 | int ret = 0; |
@@ -57,10 +57,12 @@ int blkdev_issue_discard(struct block_device *bdev, sector_t sector, | |||
57 | 57 | ||
58 | /* Zero-sector (unknown) and one-sector granularities are the same. */ | 58 | /* Zero-sector (unknown) and one-sector granularities are the same. */ |
59 | granularity = max(q->limits.discard_granularity >> 9, 1U); | 59 | granularity = max(q->limits.discard_granularity >> 9, 1U); |
60 | mask = granularity - 1; | ||
61 | alignment = (bdev_discard_alignment(bdev) >> 9) & mask; | ||
60 | 62 | ||
61 | /* | 63 | /* |
62 | * Ensure that max_discard_sectors is of the proper | 64 | * Ensure that max_discard_sectors is of the proper |
63 | * granularity | 65 | * granularity, so that requests stay aligned after a split. |
64 | */ | 66 | */ |
65 | max_discard_sectors = min(q->limits.max_discard_sectors, UINT_MAX >> 9); | 67 | max_discard_sectors = min(q->limits.max_discard_sectors, UINT_MAX >> 9); |
66 | max_discard_sectors = round_down(max_discard_sectors, granularity); | 68 | max_discard_sectors = round_down(max_discard_sectors, granularity); |
@@ -80,25 +82,37 @@ int blkdev_issue_discard(struct block_device *bdev, sector_t sector, | |||
80 | bb.wait = &wait; | 82 | bb.wait = &wait; |
81 | 83 | ||
82 | while (nr_sects) { | 84 | while (nr_sects) { |
85 | unsigned int req_sects; | ||
86 | sector_t end_sect; | ||
87 | |||
83 | bio = bio_alloc(gfp_mask, 1); | 88 | bio = bio_alloc(gfp_mask, 1); |
84 | if (!bio) { | 89 | if (!bio) { |
85 | ret = -ENOMEM; | 90 | ret = -ENOMEM; |
86 | break; | 91 | break; |
87 | } | 92 | } |
88 | 93 | ||
94 | req_sects = min_t(sector_t, nr_sects, max_discard_sectors); | ||
95 | |||
96 | /* | ||
97 | * If splitting a request, and the next starting sector would be | ||
98 | * misaligned, stop the discard at the previous aligned sector. | ||
99 | */ | ||
100 | end_sect = sector + req_sects; | ||
101 | if (req_sects < nr_sects && (end_sect & mask) != alignment) { | ||
102 | end_sect = | ||
103 | round_down(end_sect - alignment, granularity) | ||
104 | + alignment; | ||
105 | req_sects = end_sect - sector; | ||
106 | } | ||
107 | |||
89 | bio->bi_sector = sector; | 108 | bio->bi_sector = sector; |
90 | bio->bi_end_io = bio_batch_end_io; | 109 | bio->bi_end_io = bio_batch_end_io; |
91 | bio->bi_bdev = bdev; | 110 | bio->bi_bdev = bdev; |
92 | bio->bi_private = &bb; | 111 | bio->bi_private = &bb; |
93 | 112 | ||
94 | if (nr_sects > max_discard_sectors) { | 113 | bio->bi_size = req_sects << 9; |
95 | bio->bi_size = max_discard_sectors << 9; | 114 | nr_sects -= req_sects; |
96 | nr_sects -= max_discard_sectors; | 115 | sector = end_sect; |
97 | sector += max_discard_sectors; | ||
98 | } else { | ||
99 | bio->bi_size = nr_sects << 9; | ||
100 | nr_sects = 0; | ||
101 | } | ||
102 | 116 | ||
103 | atomic_inc(&bb.done); | 117 | atomic_inc(&bb.done); |
104 | submit_bio(type, bio); | 118 | submit_bio(type, bio); |
diff --git a/include/linux/blkdev.h b/include/linux/blkdev.h index 4e72a9d48232..281516ae8b4e 100644 --- a/include/linux/blkdev.h +++ b/include/linux/blkdev.h | |||
@@ -1139,6 +1139,16 @@ static inline int queue_limit_discard_alignment(struct queue_limits *lim, sector | |||
1139 | & (lim->discard_granularity - 1); | 1139 | & (lim->discard_granularity - 1); |
1140 | } | 1140 | } |
1141 | 1141 | ||
1142 | static inline int bdev_discard_alignment(struct block_device *bdev) | ||
1143 | { | ||
1144 | struct request_queue *q = bdev_get_queue(bdev); | ||
1145 | |||
1146 | if (bdev != bdev->bd_contains) | ||
1147 | return bdev->bd_part->discard_alignment; | ||
1148 | |||
1149 | return q->limits.discard_alignment; | ||
1150 | } | ||
1151 | |||
1142 | static inline unsigned int queue_discard_zeroes_data(struct request_queue *q) | 1152 | static inline unsigned int queue_discard_zeroes_data(struct request_queue *q) |
1143 | { | 1153 | { |
1144 | if (q->limits.max_discard_sectors && q->limits.discard_zeroes_data == 1) | 1154 | if (q->limits.max_discard_sectors && q->limits.discard_zeroes_data == 1) |