aboutsummaryrefslogtreecommitdiffstats
path: root/block
diff options
context:
space:
mode:
Diffstat (limited to 'block')
-rw-r--r--block/blk-lib.c41
-rw-r--r--block/blk-merge.c117
-rw-r--r--block/genhd.c2
3 files changed, 111 insertions, 49 deletions
diff --git a/block/blk-lib.c b/block/blk-lib.c
index 2b461b496a78..19cc761cacb2 100644
--- a/block/blk-lib.c
+++ b/block/blk-lib.c
@@ -44,6 +44,7 @@ int blkdev_issue_discard(struct block_device *bdev, sector_t sector,
44 struct request_queue *q = bdev_get_queue(bdev); 44 struct request_queue *q = bdev_get_queue(bdev);
45 int type = REQ_WRITE | REQ_DISCARD; 45 int type = REQ_WRITE | REQ_DISCARD;
46 unsigned int max_discard_sectors; 46 unsigned int max_discard_sectors;
47 unsigned int granularity, alignment, mask;
47 struct bio_batch bb; 48 struct bio_batch bb;
48 struct bio *bio; 49 struct bio *bio;
49 int ret = 0; 50 int ret = 0;
@@ -54,18 +55,20 @@ int blkdev_issue_discard(struct block_device *bdev, sector_t sector,
54 if (!blk_queue_discard(q)) 55 if (!blk_queue_discard(q))
55 return -EOPNOTSUPP; 56 return -EOPNOTSUPP;
56 57
58 /* Zero-sector (unknown) and one-sector granularities are the same. */
59 granularity = max(q->limits.discard_granularity >> 9, 1U);
60 mask = granularity - 1;
61 alignment = (bdev_discard_alignment(bdev) >> 9) & mask;
62
57 /* 63 /*
58 * Ensure that max_discard_sectors is of the proper 64 * Ensure that max_discard_sectors is of the proper
59 * granularity 65 * granularity, so that requests stay aligned after a split.
60 */ 66 */
61 max_discard_sectors = min(q->limits.max_discard_sectors, UINT_MAX >> 9); 67 max_discard_sectors = min(q->limits.max_discard_sectors, UINT_MAX >> 9);
68 max_discard_sectors = round_down(max_discard_sectors, granularity);
62 if (unlikely(!max_discard_sectors)) { 69 if (unlikely(!max_discard_sectors)) {
63 /* Avoid infinite loop below. Being cautious never hurts. */ 70 /* Avoid infinite loop below. Being cautious never hurts. */
64 return -EOPNOTSUPP; 71 return -EOPNOTSUPP;
65 } else if (q->limits.discard_granularity) {
66 unsigned int disc_sects = q->limits.discard_granularity >> 9;
67
68 max_discard_sectors &= ~(disc_sects - 1);
69 } 72 }
70 73
71 if (flags & BLKDEV_DISCARD_SECURE) { 74 if (flags & BLKDEV_DISCARD_SECURE) {
@@ -79,25 +82,37 @@ int blkdev_issue_discard(struct block_device *bdev, sector_t sector,
79 bb.wait = &wait; 82 bb.wait = &wait;
80 83
81 while (nr_sects) { 84 while (nr_sects) {
85 unsigned int req_sects;
86 sector_t end_sect;
87
82 bio = bio_alloc(gfp_mask, 1); 88 bio = bio_alloc(gfp_mask, 1);
83 if (!bio) { 89 if (!bio) {
84 ret = -ENOMEM; 90 ret = -ENOMEM;
85 break; 91 break;
86 } 92 }
87 93
94 req_sects = min_t(sector_t, nr_sects, max_discard_sectors);
95
96 /*
97 * If splitting a request, and the next starting sector would be
98 * misaligned, stop the discard at the previous aligned sector.
99 */
100 end_sect = sector + req_sects;
101 if (req_sects < nr_sects && (end_sect & mask) != alignment) {
102 end_sect =
103 round_down(end_sect - alignment, granularity)
104 + alignment;
105 req_sects = end_sect - sector;
106 }
107
88 bio->bi_sector = sector; 108 bio->bi_sector = sector;
89 bio->bi_end_io = bio_batch_end_io; 109 bio->bi_end_io = bio_batch_end_io;
90 bio->bi_bdev = bdev; 110 bio->bi_bdev = bdev;
91 bio->bi_private = &bb; 111 bio->bi_private = &bb;
92 112
93 if (nr_sects > max_discard_sectors) { 113 bio->bi_size = req_sects << 9;
94 bio->bi_size = max_discard_sectors << 9; 114 nr_sects -= req_sects;
95 nr_sects -= max_discard_sectors; 115 sector = end_sect;
96 sector += max_discard_sectors;
97 } else {
98 bio->bi_size = nr_sects << 9;
99 nr_sects = 0;
100 }
101 116
102 atomic_inc(&bb.done); 117 atomic_inc(&bb.done);
103 submit_bio(type, bio); 118 submit_bio(type, bio);
diff --git a/block/blk-merge.c b/block/blk-merge.c
index 160035f54882..e76279e41162 100644
--- a/block/blk-merge.c
+++ b/block/blk-merge.c
@@ -110,6 +110,49 @@ static int blk_phys_contig_segment(struct request_queue *q, struct bio *bio,
110 return 0; 110 return 0;
111} 111}
112 112
113static void
114__blk_segment_map_sg(struct request_queue *q, struct bio_vec *bvec,
115 struct scatterlist *sglist, struct bio_vec **bvprv,
116 struct scatterlist **sg, int *nsegs, int *cluster)
117{
118
119 int nbytes = bvec->bv_len;
120
121 if (*bvprv && *cluster) {
122 if ((*sg)->length + nbytes > queue_max_segment_size(q))
123 goto new_segment;
124
125 if (!BIOVEC_PHYS_MERGEABLE(*bvprv, bvec))
126 goto new_segment;
127 if (!BIOVEC_SEG_BOUNDARY(q, *bvprv, bvec))
128 goto new_segment;
129
130 (*sg)->length += nbytes;
131 } else {
132new_segment:
133 if (!*sg)
134 *sg = sglist;
135 else {
136 /*
137 * If the driver previously mapped a shorter
138 * list, we could see a termination bit
139 * prematurely unless it fully inits the sg
140 * table on each mapping. We KNOW that there
141 * must be more entries here or the driver
142 * would be buggy, so force clear the
143 * termination bit to avoid doing a full
144 * sg_init_table() in drivers for each command.
145 */
146 (*sg)->page_link &= ~0x02;
147 *sg = sg_next(*sg);
148 }
149
150 sg_set_page(*sg, bvec->bv_page, nbytes, bvec->bv_offset);
151 (*nsegs)++;
152 }
153 *bvprv = bvec;
154}
155
113/* 156/*
114 * map a request to scatterlist, return number of sg entries setup. Caller 157 * map a request to scatterlist, return number of sg entries setup. Caller
115 * must make sure sg can hold rq->nr_phys_segments entries 158 * must make sure sg can hold rq->nr_phys_segments entries
@@ -131,41 +174,8 @@ int blk_rq_map_sg(struct request_queue *q, struct request *rq,
131 bvprv = NULL; 174 bvprv = NULL;
132 sg = NULL; 175 sg = NULL;
133 rq_for_each_segment(bvec, rq, iter) { 176 rq_for_each_segment(bvec, rq, iter) {
134 int nbytes = bvec->bv_len; 177 __blk_segment_map_sg(q, bvec, sglist, &bvprv, &sg,
135 178 &nsegs, &cluster);
136 if (bvprv && cluster) {
137 if (sg->length + nbytes > queue_max_segment_size(q))
138 goto new_segment;
139
140 if (!BIOVEC_PHYS_MERGEABLE(bvprv, bvec))
141 goto new_segment;
142 if (!BIOVEC_SEG_BOUNDARY(q, bvprv, bvec))
143 goto new_segment;
144
145 sg->length += nbytes;
146 } else {
147new_segment:
148 if (!sg)
149 sg = sglist;
150 else {
151 /*
152 * If the driver previously mapped a shorter
153 * list, we could see a termination bit
154 * prematurely unless it fully inits the sg
155 * table on each mapping. We KNOW that there
156 * must be more entries here or the driver
157 * would be buggy, so force clear the
158 * termination bit to avoid doing a full
159 * sg_init_table() in drivers for each command.
160 */
161 sg->page_link &= ~0x02;
162 sg = sg_next(sg);
163 }
164
165 sg_set_page(sg, bvec->bv_page, nbytes, bvec->bv_offset);
166 nsegs++;
167 }
168 bvprv = bvec;
169 } /* segments in rq */ 179 } /* segments in rq */
170 180
171 181
@@ -199,6 +209,43 @@ new_segment:
199} 209}
200EXPORT_SYMBOL(blk_rq_map_sg); 210EXPORT_SYMBOL(blk_rq_map_sg);
201 211
212/**
213 * blk_bio_map_sg - map a bio to a scatterlist
214 * @q: request_queue in question
215 * @bio: bio being mapped
216 * @sglist: scatterlist being mapped
217 *
218 * Note:
219 * Caller must make sure sg can hold bio->bi_phys_segments entries
220 *
221 * Will return the number of sg entries setup
222 */
223int blk_bio_map_sg(struct request_queue *q, struct bio *bio,
224 struct scatterlist *sglist)
225{
226 struct bio_vec *bvec, *bvprv;
227 struct scatterlist *sg;
228 int nsegs, cluster;
229 unsigned long i;
230
231 nsegs = 0;
232 cluster = blk_queue_cluster(q);
233
234 bvprv = NULL;
235 sg = NULL;
236 bio_for_each_segment(bvec, bio, i) {
237 __blk_segment_map_sg(q, bvec, sglist, &bvprv, &sg,
238 &nsegs, &cluster);
239 } /* segments in bio */
240
241 if (sg)
242 sg_mark_end(sg);
243
244 BUG_ON(bio->bi_phys_segments && nsegs > bio->bi_phys_segments);
245 return nsegs;
246}
247EXPORT_SYMBOL(blk_bio_map_sg);
248
202static inline int ll_new_hw_segment(struct request_queue *q, 249static inline int ll_new_hw_segment(struct request_queue *q,
203 struct request *req, 250 struct request *req,
204 struct bio *bio) 251 struct bio *bio)
diff --git a/block/genhd.c b/block/genhd.c
index cac7366957c3..d839723303c8 100644
--- a/block/genhd.c
+++ b/block/genhd.c
@@ -835,7 +835,7 @@ static void disk_seqf_stop(struct seq_file *seqf, void *v)
835 835
836static void *show_partition_start(struct seq_file *seqf, loff_t *pos) 836static void *show_partition_start(struct seq_file *seqf, loff_t *pos)
837{ 837{
838 static void *p; 838 void *p;
839 839
840 p = disk_seqf_start(seqf, pos); 840 p = disk_seqf_start(seqf, pos);
841 if (!IS_ERR_OR_NULL(p) && !*pos) 841 if (!IS_ERR_OR_NULL(p) && !*pos)