aboutsummaryrefslogtreecommitdiffstats
path: root/mm/bounce.c
diff options
context:
space:
mode:
authorLinus Torvalds <torvalds@linux-foundation.org>2013-05-08 13:13:35 -0400
committerLinus Torvalds <torvalds@linux-foundation.org>2013-05-08 13:13:35 -0400
commit4de13d7aa8f4d02f4dc99d4609575659f92b3c5a (patch)
tree3bc9729eabe79c6164cd29a5d605000bc82bf837 /mm/bounce.c
parent5af43c24ca59a448c9312dd4a4a51d27ec3b9a73 (diff)
parentb8d4a5bf6a049303a29a3275f463f09a490b50ea (diff)
Merge branch 'for-3.10/core' of git://git.kernel.dk/linux-block
Pull block core updates from Jens Axboe: - Major bit is Kents prep work for immutable bio vecs. - Stable candidate fix for a scheduling-while-atomic in the queue bypass operation. - Fix for the hang on exceeded rq->datalen 32-bit unsigned when merging discard bios. - Tejuns changes to convert the writeback thread pool to the generic workqueue mechanism. - Runtime PM framework, SCSI patches exists on top of these in James' tree. - A few random fixes. * 'for-3.10/core' of git://git.kernel.dk/linux-block: (40 commits) relay: move remove_buf_file inside relay_close_buf partitions/efi.c: replace useless kzalloc's by kmalloc's fs/block_dev.c: fix iov_shorten() criteria in blkdev_aio_read() block: fix max discard sectors limit blkcg: fix "scheduling while atomic" in blk_queue_bypass_start Documentation: cfq-iosched: update documentation help for cfq tunables writeback: expose the bdi_wq workqueue writeback: replace custom worker pool implementation with unbound workqueue writeback: remove unused bdi_pending_list aoe: Fix unitialized var usage bio-integrity: Add explicit field for owner of bip_buf block: Add an explicit bio flag for bios that own their bvec block: Add bio_alloc_pages() block: Convert some code to bio_for_each_segment_all() block: Add bio_for_each_segment_all() bounce: Refactor __blk_queue_bounce to not use bi_io_vec raid1: use bio_copy_data() pktcdvd: Use bio_reset() in disabled code to kill bi_idx usage pktcdvd: use bio_copy_data() block: Add bio_copy_data() ...
Diffstat (limited to 'mm/bounce.c')
-rw-r--r--mm/bounce.c75
1 files changed, 20 insertions, 55 deletions
diff --git a/mm/bounce.c b/mm/bounce.c
index a5c2ec3589cb..c9f0a4339a7d 100644
--- a/mm/bounce.c
+++ b/mm/bounce.c
@@ -101,7 +101,7 @@ static void copy_to_high_bio_irq(struct bio *to, struct bio *from)
101 struct bio_vec *tovec, *fromvec; 101 struct bio_vec *tovec, *fromvec;
102 int i; 102 int i;
103 103
104 __bio_for_each_segment(tovec, to, i, 0) { 104 bio_for_each_segment(tovec, to, i) {
105 fromvec = from->bi_io_vec + i; 105 fromvec = from->bi_io_vec + i;
106 106
107 /* 107 /*
@@ -134,7 +134,7 @@ static void bounce_end_io(struct bio *bio, mempool_t *pool, int err)
134 /* 134 /*
135 * free up bounce indirect pages used 135 * free up bounce indirect pages used
136 */ 136 */
137 __bio_for_each_segment(bvec, bio, i, 0) { 137 bio_for_each_segment_all(bvec, bio, i) {
138 org_vec = bio_orig->bi_io_vec + i; 138 org_vec = bio_orig->bi_io_vec + i;
139 if (bvec->bv_page == org_vec->bv_page) 139 if (bvec->bv_page == org_vec->bv_page)
140 continue; 140 continue;
@@ -199,78 +199,43 @@ static int must_snapshot_stable_pages(struct request_queue *q, struct bio *bio)
199static void __blk_queue_bounce(struct request_queue *q, struct bio **bio_orig, 199static void __blk_queue_bounce(struct request_queue *q, struct bio **bio_orig,
200 mempool_t *pool, int force) 200 mempool_t *pool, int force)
201{ 201{
202 struct page *page; 202 struct bio *bio;
203 struct bio *bio = NULL; 203 int rw = bio_data_dir(*bio_orig);
204 int i, rw = bio_data_dir(*bio_orig);
205 struct bio_vec *to, *from; 204 struct bio_vec *to, *from;
205 unsigned i;
206 206
207 bio_for_each_segment(from, *bio_orig, i) { 207 bio_for_each_segment(from, *bio_orig, i)
208 page = from->bv_page; 208 if (page_to_pfn(from->bv_page) > queue_bounce_pfn(q))
209 goto bounce;
209 210
210 /* 211 return;
211 * is destination page below bounce pfn? 212bounce:
212 */ 213 bio = bio_clone_bioset(*bio_orig, GFP_NOIO, fs_bio_set);
213 if (page_to_pfn(page) <= queue_bounce_pfn(q) && !force)
214 continue;
215
216 /*
217 * irk, bounce it
218 */
219 if (!bio) {
220 unsigned int cnt = (*bio_orig)->bi_vcnt;
221 214
222 bio = bio_alloc(GFP_NOIO, cnt); 215 bio_for_each_segment_all(to, bio, i) {
223 memset(bio->bi_io_vec, 0, cnt * sizeof(struct bio_vec)); 216 struct page *page = to->bv_page;
224 }
225
226 217
227 to = bio->bi_io_vec + i; 218 if (page_to_pfn(page) <= queue_bounce_pfn(q) && !force)
219 continue;
228 220
229 to->bv_page = mempool_alloc(pool, q->bounce_gfp);
230 to->bv_len = from->bv_len;
231 to->bv_offset = from->bv_offset;
232 inc_zone_page_state(to->bv_page, NR_BOUNCE); 221 inc_zone_page_state(to->bv_page, NR_BOUNCE);
222 to->bv_page = mempool_alloc(pool, q->bounce_gfp);
233 223
234 if (rw == WRITE) { 224 if (rw == WRITE) {
235 char *vto, *vfrom; 225 char *vto, *vfrom;
236 226
237 flush_dcache_page(from->bv_page); 227 flush_dcache_page(page);
228
238 vto = page_address(to->bv_page) + to->bv_offset; 229 vto = page_address(to->bv_page) + to->bv_offset;
239 vfrom = kmap(from->bv_page) + from->bv_offset; 230 vfrom = kmap_atomic(page) + to->bv_offset;
240 memcpy(vto, vfrom, to->bv_len); 231 memcpy(vto, vfrom, to->bv_len);
241 kunmap(from->bv_page); 232 kunmap_atomic(vfrom);
242 } 233 }
243 } 234 }
244 235
245 /*
246 * no pages bounced
247 */
248 if (!bio)
249 return;
250
251 trace_block_bio_bounce(q, *bio_orig); 236 trace_block_bio_bounce(q, *bio_orig);
252 237
253 /*
254 * at least one page was bounced, fill in possible non-highmem
255 * pages
256 */
257 __bio_for_each_segment(from, *bio_orig, i, 0) {
258 to = bio_iovec_idx(bio, i);
259 if (!to->bv_page) {
260 to->bv_page = from->bv_page;
261 to->bv_len = from->bv_len;
262 to->bv_offset = from->bv_offset;
263 }
264 }
265
266 bio->bi_bdev = (*bio_orig)->bi_bdev;
267 bio->bi_flags |= (1 << BIO_BOUNCED); 238 bio->bi_flags |= (1 << BIO_BOUNCED);
268 bio->bi_sector = (*bio_orig)->bi_sector;
269 bio->bi_rw = (*bio_orig)->bi_rw;
270
271 bio->bi_vcnt = (*bio_orig)->bi_vcnt;
272 bio->bi_idx = (*bio_orig)->bi_idx;
273 bio->bi_size = (*bio_orig)->bi_size;
274 239
275 if (pool == page_pool) { 240 if (pool == page_pool) {
276 bio->bi_end_io = bounce_end_io_write; 241 bio->bi_end_io = bounce_end_io_write;