aboutsummaryrefslogtreecommitdiffstats
path: root/mm/bounce.c
diff options
context:
space:
mode:
Diffstat (limited to 'mm/bounce.c')
-rw-r--r--mm/bounce.c21
1 files changed, 1 insertions, 20 deletions
diff --git a/mm/bounce.c b/mm/bounce.c
index 5f8901768602..a5c2ec3589cb 100644
--- a/mm/bounce.c
+++ b/mm/bounce.c
@@ -181,32 +181,13 @@ static void bounce_end_io_read_isa(struct bio *bio, int err)
181#ifdef CONFIG_NEED_BOUNCE_POOL 181#ifdef CONFIG_NEED_BOUNCE_POOL
182static int must_snapshot_stable_pages(struct request_queue *q, struct bio *bio) 182static int must_snapshot_stable_pages(struct request_queue *q, struct bio *bio)
183{ 183{
184 struct page *page;
185 struct backing_dev_info *bdi;
186 struct address_space *mapping;
187 struct bio_vec *from;
188 int i;
189
190 if (bio_data_dir(bio) != WRITE) 184 if (bio_data_dir(bio) != WRITE)
191 return 0; 185 return 0;
192 186
193 if (!bdi_cap_stable_pages_required(&q->backing_dev_info)) 187 if (!bdi_cap_stable_pages_required(&q->backing_dev_info))
194 return 0; 188 return 0;
195 189
196 /* 190 return test_bit(BIO_SNAP_STABLE, &bio->bi_flags);
197 * Based on the first page that has a valid mapping, decide whether or
198 * not we have to employ bounce buffering to guarantee stable pages.
199 */
200 bio_for_each_segment(from, bio, i) {
201 page = from->bv_page;
202 mapping = page_mapping(page);
203 if (!mapping)
204 continue;
205 bdi = mapping->backing_dev_info;
206 return mapping->host->i_sb->s_flags & MS_SNAP_STABLE;
207 }
208
209 return 0;
210} 191}
211#else 192#else
212static int must_snapshot_stable_pages(struct request_queue *q, struct bio *bio) 193static int must_snapshot_stable_pages(struct request_queue *q, struct bio *bio)