aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorJan Kara <jack@suse.cz>2015-06-18 11:19:14 -0400
committerJan Kara <jack@suse.com>2015-07-23 14:59:40 -0400
commita3ad0a9da863fa554fc17fa8345a07adcdd27d3c (patch)
treea37306c7997b48d3613391f93e77df59e6233b8c
parentc290ea01abb7907fde602f3ba55905ef10a37477 (diff)
block: Remove forced page bouncing under IO
JBD layer wrote back data buffers without setting PageWriteback bit. Thus standard mechanism for guaranteeing stable pages under IO did not work. Since JBD is gone now and there is no other user of the functionality, just remove it. Acked-by: Jens Axboe <axboe@kernel.dk> Signed-off-by: Jan Kara <jack@suse.cz>
-rw-r--r--block/bounce.c31
-rw-r--r--include/linux/blk_types.h5
2 files changed, 6 insertions, 30 deletions
diff --git a/block/bounce.c b/block/bounce.c
index b17311227c12..31cad13a0c9d 100644
--- a/block/bounce.c
+++ b/block/bounce.c
@@ -176,26 +176,8 @@ static void bounce_end_io_read_isa(struct bio *bio, int err)
176 __bounce_end_io_read(bio, isa_page_pool, err); 176 __bounce_end_io_read(bio, isa_page_pool, err);
177} 177}
178 178
179#ifdef CONFIG_NEED_BOUNCE_POOL
180static int must_snapshot_stable_pages(struct request_queue *q, struct bio *bio)
181{
182 if (bio_data_dir(bio) != WRITE)
183 return 0;
184
185 if (!bdi_cap_stable_pages_required(&q->backing_dev_info))
186 return 0;
187
188 return test_bit(BIO_SNAP_STABLE, &bio->bi_flags);
189}
190#else
191static int must_snapshot_stable_pages(struct request_queue *q, struct bio *bio)
192{
193 return 0;
194}
195#endif /* CONFIG_NEED_BOUNCE_POOL */
196
197static void __blk_queue_bounce(struct request_queue *q, struct bio **bio_orig, 179static void __blk_queue_bounce(struct request_queue *q, struct bio **bio_orig,
198 mempool_t *pool, int force) 180 mempool_t *pool)
199{ 181{
200 struct bio *bio; 182 struct bio *bio;
201 int rw = bio_data_dir(*bio_orig); 183 int rw = bio_data_dir(*bio_orig);
@@ -203,8 +185,6 @@ static void __blk_queue_bounce(struct request_queue *q, struct bio **bio_orig,
203 struct bvec_iter iter; 185 struct bvec_iter iter;
204 unsigned i; 186 unsigned i;
205 187
206 if (force)
207 goto bounce;
208 bio_for_each_segment(from, *bio_orig, iter) 188 bio_for_each_segment(from, *bio_orig, iter)
209 if (page_to_pfn(from.bv_page) > queue_bounce_pfn(q)) 189 if (page_to_pfn(from.bv_page) > queue_bounce_pfn(q))
210 goto bounce; 190 goto bounce;
@@ -216,7 +196,7 @@ bounce:
216 bio_for_each_segment_all(to, bio, i) { 196 bio_for_each_segment_all(to, bio, i) {
217 struct page *page = to->bv_page; 197 struct page *page = to->bv_page;
218 198
219 if (page_to_pfn(page) <= queue_bounce_pfn(q) && !force) 199 if (page_to_pfn(page) <= queue_bounce_pfn(q))
220 continue; 200 continue;
221 201
222 to->bv_page = mempool_alloc(pool, q->bounce_gfp); 202 to->bv_page = mempool_alloc(pool, q->bounce_gfp);
@@ -254,7 +234,6 @@ bounce:
254 234
255void blk_queue_bounce(struct request_queue *q, struct bio **bio_orig) 235void blk_queue_bounce(struct request_queue *q, struct bio **bio_orig)
256{ 236{
257 int must_bounce;
258 mempool_t *pool; 237 mempool_t *pool;
259 238
260 /* 239 /*
@@ -263,15 +242,13 @@ void blk_queue_bounce(struct request_queue *q, struct bio **bio_orig)
263 if (!bio_has_data(*bio_orig)) 242 if (!bio_has_data(*bio_orig))
264 return; 243 return;
265 244
266 must_bounce = must_snapshot_stable_pages(q, *bio_orig);
267
268 /* 245 /*
269 * for non-isa bounce case, just check if the bounce pfn is equal 246 * for non-isa bounce case, just check if the bounce pfn is equal
270 * to or bigger than the highest pfn in the system -- in that case, 247 * to or bigger than the highest pfn in the system -- in that case,
271 * don't waste time iterating over bio segments 248 * don't waste time iterating over bio segments
272 */ 249 */
273 if (!(q->bounce_gfp & GFP_DMA)) { 250 if (!(q->bounce_gfp & GFP_DMA)) {
274 if (queue_bounce_pfn(q) >= blk_max_pfn && !must_bounce) 251 if (queue_bounce_pfn(q) >= blk_max_pfn)
275 return; 252 return;
276 pool = page_pool; 253 pool = page_pool;
277 } else { 254 } else {
@@ -282,7 +259,7 @@ void blk_queue_bounce(struct request_queue *q, struct bio **bio_orig)
282 /* 259 /*
283 * slow path 260 * slow path
284 */ 261 */
285 __blk_queue_bounce(q, bio_orig, pool, must_bounce); 262 __blk_queue_bounce(q, bio_orig, pool);
286} 263}
287 264
288EXPORT_SYMBOL(blk_queue_bounce); 265EXPORT_SYMBOL(blk_queue_bounce);
diff --git a/include/linux/blk_types.h b/include/linux/blk_types.h
index 7303b3405520..89fd49184b48 100644
--- a/include/linux/blk_types.h
+++ b/include/linux/blk_types.h
@@ -118,9 +118,8 @@ struct bio {
118#define BIO_USER_MAPPED 4 /* contains user pages */ 118#define BIO_USER_MAPPED 4 /* contains user pages */
119#define BIO_NULL_MAPPED 5 /* contains invalid user pages */ 119#define BIO_NULL_MAPPED 5 /* contains invalid user pages */
120#define BIO_QUIET 6 /* Make BIO Quiet */ 120#define BIO_QUIET 6 /* Make BIO Quiet */
121#define BIO_SNAP_STABLE 7 /* bio data must be snapshotted during write */ 121#define BIO_CHAIN 7 /* chained bio, ->bi_remaining in effect */
122#define BIO_CHAIN 8 /* chained bio, ->bi_remaining in effect */ 122#define BIO_REFFED 8 /* bio has elevated ->bi_cnt */
123#define BIO_REFFED 9 /* bio has elevated ->bi_cnt */
124 123
125/* 124/*
126 * Flags starting here get preserved by bio_reset() - this includes 125 * Flags starting here get preserved by bio_reset() - this includes