diff options
author | Mark Brown <broonie@opensource.wolfsonmicro.com> | 2012-09-05 08:05:11 -0400 |
---|---|---|
committer | Mark Brown <broonie@opensource.wolfsonmicro.com> | 2012-09-05 08:05:11 -0400 |
commit | 75d8f2931a803b803cb4a850448460475c20f30b (patch) | |
tree | 9853b9084fa55609c8e4abbc1763bc500e05da50 /block/blk-merge.c | |
parent | ffb690d5aa36d38d7bed7579e3f07b84ff6b3a08 (diff) | |
parent | e93c7d1bc350189511d32cec2f0af79c30e7fa47 (diff) |
Merge branch 'asoc-omap' into for-3.7
Diffstat (limited to 'block/blk-merge.c')
-rw-r--r-- | block/blk-merge.c | 117 |
1 files changed, 82 insertions, 35 deletions
diff --git a/block/blk-merge.c b/block/blk-merge.c index 160035f54882..e76279e41162 100644 --- a/block/blk-merge.c +++ b/block/blk-merge.c | |||
@@ -110,6 +110,49 @@ static int blk_phys_contig_segment(struct request_queue *q, struct bio *bio, | |||
110 | return 0; | 110 | return 0; |
111 | } | 111 | } |
112 | 112 | ||
113 | static void | ||
114 | __blk_segment_map_sg(struct request_queue *q, struct bio_vec *bvec, | ||
115 | struct scatterlist *sglist, struct bio_vec **bvprv, | ||
116 | struct scatterlist **sg, int *nsegs, int *cluster) | ||
117 | { | ||
118 | |||
119 | int nbytes = bvec->bv_len; | ||
120 | |||
121 | if (*bvprv && *cluster) { | ||
122 | if ((*sg)->length + nbytes > queue_max_segment_size(q)) | ||
123 | goto new_segment; | ||
124 | |||
125 | if (!BIOVEC_PHYS_MERGEABLE(*bvprv, bvec)) | ||
126 | goto new_segment; | ||
127 | if (!BIOVEC_SEG_BOUNDARY(q, *bvprv, bvec)) | ||
128 | goto new_segment; | ||
129 | |||
130 | (*sg)->length += nbytes; | ||
131 | } else { | ||
132 | new_segment: | ||
133 | if (!*sg) | ||
134 | *sg = sglist; | ||
135 | else { | ||
136 | /* | ||
137 | * If the driver previously mapped a shorter | ||
138 | * list, we could see a termination bit | ||
139 | * prematurely unless it fully inits the sg | ||
140 | * table on each mapping. We KNOW that there | ||
141 | * must be more entries here or the driver | ||
142 | * would be buggy, so force clear the | ||
143 | * termination bit to avoid doing a full | ||
144 | * sg_init_table() in drivers for each command. | ||
145 | */ | ||
146 | (*sg)->page_link &= ~0x02; | ||
147 | *sg = sg_next(*sg); | ||
148 | } | ||
149 | |||
150 | sg_set_page(*sg, bvec->bv_page, nbytes, bvec->bv_offset); | ||
151 | (*nsegs)++; | ||
152 | } | ||
153 | *bvprv = bvec; | ||
154 | } | ||
155 | |||
113 | /* | 156 | /* |
114 | * map a request to scatterlist, return number of sg entries setup. Caller | 157 | * map a request to scatterlist, return number of sg entries setup. Caller |
115 | * must make sure sg can hold rq->nr_phys_segments entries | 158 | * must make sure sg can hold rq->nr_phys_segments entries |
@@ -131,41 +174,8 @@ int blk_rq_map_sg(struct request_queue *q, struct request *rq, | |||
131 | bvprv = NULL; | 174 | bvprv = NULL; |
132 | sg = NULL; | 175 | sg = NULL; |
133 | rq_for_each_segment(bvec, rq, iter) { | 176 | rq_for_each_segment(bvec, rq, iter) { |
134 | int nbytes = bvec->bv_len; | 177 | __blk_segment_map_sg(q, bvec, sglist, &bvprv, &sg, |
135 | 178 | &nsegs, &cluster); | |
136 | if (bvprv && cluster) { | ||
137 | if (sg->length + nbytes > queue_max_segment_size(q)) | ||
138 | goto new_segment; | ||
139 | |||
140 | if (!BIOVEC_PHYS_MERGEABLE(bvprv, bvec)) | ||
141 | goto new_segment; | ||
142 | if (!BIOVEC_SEG_BOUNDARY(q, bvprv, bvec)) | ||
143 | goto new_segment; | ||
144 | |||
145 | sg->length += nbytes; | ||
146 | } else { | ||
147 | new_segment: | ||
148 | if (!sg) | ||
149 | sg = sglist; | ||
150 | else { | ||
151 | /* | ||
152 | * If the driver previously mapped a shorter | ||
153 | * list, we could see a termination bit | ||
154 | * prematurely unless it fully inits the sg | ||
155 | * table on each mapping. We KNOW that there | ||
156 | * must be more entries here or the driver | ||
157 | * would be buggy, so force clear the | ||
158 | * termination bit to avoid doing a full | ||
159 | * sg_init_table() in drivers for each command. | ||
160 | */ | ||
161 | sg->page_link &= ~0x02; | ||
162 | sg = sg_next(sg); | ||
163 | } | ||
164 | |||
165 | sg_set_page(sg, bvec->bv_page, nbytes, bvec->bv_offset); | ||
166 | nsegs++; | ||
167 | } | ||
168 | bvprv = bvec; | ||
169 | } /* segments in rq */ | 179 | } /* segments in rq */ |
170 | 180 | ||
171 | 181 | ||
@@ -199,6 +209,43 @@ new_segment: | |||
199 | } | 209 | } |
200 | EXPORT_SYMBOL(blk_rq_map_sg); | 210 | EXPORT_SYMBOL(blk_rq_map_sg); |
201 | 211 | ||
212 | /** | ||
213 | * blk_bio_map_sg - map a bio to a scatterlist | ||
214 | * @q: request_queue in question | ||
215 | * @bio: bio being mapped | ||
216 | * @sglist: scatterlist being mapped | ||
217 | * | ||
218 | * Note: | ||
219 | * Caller must make sure sg can hold bio->bi_phys_segments entries | ||
220 | * | ||
221 | * Will return the number of sg entries setup | ||
222 | */ | ||
223 | int blk_bio_map_sg(struct request_queue *q, struct bio *bio, | ||
224 | struct scatterlist *sglist) | ||
225 | { | ||
226 | struct bio_vec *bvec, *bvprv; | ||
227 | struct scatterlist *sg; | ||
228 | int nsegs, cluster; | ||
229 | unsigned long i; | ||
230 | |||
231 | nsegs = 0; | ||
232 | cluster = blk_queue_cluster(q); | ||
233 | |||
234 | bvprv = NULL; | ||
235 | sg = NULL; | ||
236 | bio_for_each_segment(bvec, bio, i) { | ||
237 | __blk_segment_map_sg(q, bvec, sglist, &bvprv, &sg, | ||
238 | &nsegs, &cluster); | ||
239 | } /* segments in bio */ | ||
240 | |||
241 | if (sg) | ||
242 | sg_mark_end(sg); | ||
243 | |||
244 | BUG_ON(bio->bi_phys_segments && nsegs > bio->bi_phys_segments); | ||
245 | return nsegs; | ||
246 | } | ||
247 | EXPORT_SYMBOL(blk_bio_map_sg); | ||
248 | |||
202 | static inline int ll_new_hw_segment(struct request_queue *q, | 249 | static inline int ll_new_hw_segment(struct request_queue *q, |
203 | struct request *req, | 250 | struct request *req, |
204 | struct bio *bio) | 251 | struct bio *bio) |