aboutsummaryrefslogtreecommitdiffstats
path: root/mm
diff options
context:
space:
mode:
Diffstat (limited to 'mm')
-rw-r--r--mm/Kconfig13
-rw-r--r--mm/backing-dev.c11
-rw-r--r--mm/bounce.c48
-rw-r--r--mm/filemap.c3
-rw-r--r--mm/page-writeback.c24
5 files changed, 94 insertions, 5 deletions
diff --git a/mm/Kconfig b/mm/Kconfig
index 308fdcaeed77..0b23db9a8791 100644
--- a/mm/Kconfig
+++ b/mm/Kconfig
@@ -258,6 +258,19 @@ config BOUNCE
258 def_bool y 258 def_bool y
259 depends on BLOCK && MMU && (ZONE_DMA || HIGHMEM) 259 depends on BLOCK && MMU && (ZONE_DMA || HIGHMEM)
260 260
261# On the 'tile' arch, USB OHCI needs the bounce pool since tilegx will often
262# have more than 4GB of memory, but we don't currently use the IOTLB to present
263# a 32-bit address to OHCI. So we need to use a bounce pool instead.
264#
265# We also use the bounce pool to provide stable page writes for jbd. jbd
266# initiates buffer writeback without locking the page or setting PG_writeback,
267# and fixing that behavior (a second time; jbd2 doesn't have this problem) is
268# a major rework effort. Instead, use the bounce buffer to snapshot pages
269# (until jbd goes away). The only jbd user is ext3.
270config NEED_BOUNCE_POOL
271 bool
272 default y if (TILE && USB_OHCI_HCD) || (BLK_DEV_INTEGRITY && JBD)
273
261config NR_QUICK 274config NR_QUICK
262 int 275 int
263 depends on QUICKLIST 276 depends on QUICKLIST
diff --git a/mm/backing-dev.c b/mm/backing-dev.c
index d3ca2b3ee176..41733c5dc820 100644
--- a/mm/backing-dev.c
+++ b/mm/backing-dev.c
@@ -221,12 +221,23 @@ static ssize_t max_ratio_store(struct device *dev,
221} 221}
222BDI_SHOW(max_ratio, bdi->max_ratio) 222BDI_SHOW(max_ratio, bdi->max_ratio)
223 223
224static ssize_t stable_pages_required_show(struct device *dev,
225 struct device_attribute *attr,
226 char *page)
227{
228 struct backing_dev_info *bdi = dev_get_drvdata(dev);
229
230 return snprintf(page, PAGE_SIZE-1, "%d\n",
231 bdi_cap_stable_pages_required(bdi) ? 1 : 0);
232}
233
224#define __ATTR_RW(attr) __ATTR(attr, 0644, attr##_show, attr##_store) 234#define __ATTR_RW(attr) __ATTR(attr, 0644, attr##_show, attr##_store)
225 235
226static struct device_attribute bdi_dev_attrs[] = { 236static struct device_attribute bdi_dev_attrs[] = {
227 __ATTR_RW(read_ahead_kb), 237 __ATTR_RW(read_ahead_kb),
228 __ATTR_RW(min_ratio), 238 __ATTR_RW(min_ratio),
229 __ATTR_RW(max_ratio), 239 __ATTR_RW(max_ratio),
240 __ATTR_RO(stable_pages_required),
230 __ATTR_NULL, 241 __ATTR_NULL,
231}; 242};
232 243
diff --git a/mm/bounce.c b/mm/bounce.c
index 042086775561..5f8901768602 100644
--- a/mm/bounce.c
+++ b/mm/bounce.c
@@ -178,8 +178,45 @@ static void bounce_end_io_read_isa(struct bio *bio, int err)
178 __bounce_end_io_read(bio, isa_page_pool, err); 178 __bounce_end_io_read(bio, isa_page_pool, err);
179} 179}
180 180
181#ifdef CONFIG_NEED_BOUNCE_POOL
182static int must_snapshot_stable_pages(struct request_queue *q, struct bio *bio)
183{
184 struct page *page;
185 struct backing_dev_info *bdi;
186 struct address_space *mapping;
187 struct bio_vec *from;
188 int i;
189
190 if (bio_data_dir(bio) != WRITE)
191 return 0;
192
193 if (!bdi_cap_stable_pages_required(&q->backing_dev_info))
194 return 0;
195
196 /*
197 * Based on the first page that has a valid mapping, decide whether or
198 * not we have to employ bounce buffering to guarantee stable pages.
199 */
200 bio_for_each_segment(from, bio, i) {
201 page = from->bv_page;
202 mapping = page_mapping(page);
203 if (!mapping)
204 continue;
205 bdi = mapping->backing_dev_info;
206 return mapping->host->i_sb->s_flags & MS_SNAP_STABLE;
207 }
208
209 return 0;
210}
211#else
212static int must_snapshot_stable_pages(struct request_queue *q, struct bio *bio)
213{
214 return 0;
215}
216#endif /* CONFIG_NEED_BOUNCE_POOL */
217
181static void __blk_queue_bounce(struct request_queue *q, struct bio **bio_orig, 218static void __blk_queue_bounce(struct request_queue *q, struct bio **bio_orig,
182 mempool_t *pool) 219 mempool_t *pool, int force)
183{ 220{
184 struct page *page; 221 struct page *page;
185 struct bio *bio = NULL; 222 struct bio *bio = NULL;
@@ -192,7 +229,7 @@ static void __blk_queue_bounce(struct request_queue *q, struct bio **bio_orig,
192 /* 229 /*
193 * is destination page below bounce pfn? 230 * is destination page below bounce pfn?
194 */ 231 */
195 if (page_to_pfn(page) <= queue_bounce_pfn(q)) 232 if (page_to_pfn(page) <= queue_bounce_pfn(q) && !force)
196 continue; 233 continue;
197 234
198 /* 235 /*
@@ -270,6 +307,7 @@ static void __blk_queue_bounce(struct request_queue *q, struct bio **bio_orig,
270 307
271void blk_queue_bounce(struct request_queue *q, struct bio **bio_orig) 308void blk_queue_bounce(struct request_queue *q, struct bio **bio_orig)
272{ 309{
310 int must_bounce;
273 mempool_t *pool; 311 mempool_t *pool;
274 312
275 /* 313 /*
@@ -278,13 +316,15 @@ void blk_queue_bounce(struct request_queue *q, struct bio **bio_orig)
278 if (!bio_has_data(*bio_orig)) 316 if (!bio_has_data(*bio_orig))
279 return; 317 return;
280 318
319 must_bounce = must_snapshot_stable_pages(q, *bio_orig);
320
281 /* 321 /*
282 * for non-isa bounce case, just check if the bounce pfn is equal 322 * for non-isa bounce case, just check if the bounce pfn is equal
283 * to or bigger than the highest pfn in the system -- in that case, 323 * to or bigger than the highest pfn in the system -- in that case,
284 * don't waste time iterating over bio segments 324 * don't waste time iterating over bio segments
285 */ 325 */
286 if (!(q->bounce_gfp & GFP_DMA)) { 326 if (!(q->bounce_gfp & GFP_DMA)) {
287 if (queue_bounce_pfn(q) >= blk_max_pfn) 327 if (queue_bounce_pfn(q) >= blk_max_pfn && !must_bounce)
288 return; 328 return;
289 pool = page_pool; 329 pool = page_pool;
290 } else { 330 } else {
@@ -295,7 +335,7 @@ void blk_queue_bounce(struct request_queue *q, struct bio **bio_orig)
295 /* 335 /*
296 * slow path 336 * slow path
297 */ 337 */
298 __blk_queue_bounce(q, bio_orig, pool); 338 __blk_queue_bounce(q, bio_orig, pool, must_bounce);
299} 339}
300 340
301EXPORT_SYMBOL(blk_queue_bounce); 341EXPORT_SYMBOL(blk_queue_bounce);
diff --git a/mm/filemap.c b/mm/filemap.c
index 24a7ea583f0c..c610076c30e1 100644
--- a/mm/filemap.c
+++ b/mm/filemap.c
@@ -1728,6 +1728,7 @@ int filemap_page_mkwrite(struct vm_area_struct *vma, struct vm_fault *vmf)
1728 * see the dirty page and writeprotect it again. 1728 * see the dirty page and writeprotect it again.
1729 */ 1729 */
1730 set_page_dirty(page); 1730 set_page_dirty(page);
1731 wait_for_stable_page(page);
1731out: 1732out:
1732 sb_end_pagefault(inode->i_sb); 1733 sb_end_pagefault(inode->i_sb);
1733 return ret; 1734 return ret;
@@ -2274,7 +2275,7 @@ repeat:
2274 return NULL; 2275 return NULL;
2275 } 2276 }
2276found: 2277found:
2277 wait_on_page_writeback(page); 2278 wait_for_stable_page(page);
2278 return page; 2279 return page;
2279} 2280}
2280EXPORT_SYMBOL(grab_cache_page_write_begin); 2281EXPORT_SYMBOL(grab_cache_page_write_begin);
diff --git a/mm/page-writeback.c b/mm/page-writeback.c
index 66a0024becd9..7300c9d5e1d9 100644
--- a/mm/page-writeback.c
+++ b/mm/page-writeback.c
@@ -2290,3 +2290,27 @@ int mapping_tagged(struct address_space *mapping, int tag)
2290 return radix_tree_tagged(&mapping->page_tree, tag); 2290 return radix_tree_tagged(&mapping->page_tree, tag);
2291} 2291}
2292EXPORT_SYMBOL(mapping_tagged); 2292EXPORT_SYMBOL(mapping_tagged);
2293
2294/**
2295 * wait_for_stable_page() - wait for writeback to finish, if necessary.
2296 * @page: The page to wait on.
2297 *
2298 * This function determines if the given page is related to a backing device
2299 * that requires page contents to be held stable during writeback. If so, then
2300 * it will wait for any pending writeback to complete.
2301 */
2302void wait_for_stable_page(struct page *page)
2303{
2304 struct address_space *mapping = page_mapping(page);
2305 struct backing_dev_info *bdi = mapping->backing_dev_info;
2306
2307 if (!bdi_cap_stable_pages_required(bdi))
2308 return;
2309#ifdef CONFIG_NEED_BOUNCE_POOL
2310 if (mapping->host->i_sb->s_flags & MS_SNAP_STABLE)
2311 return;
2312#endif /* CONFIG_NEED_BOUNCE_POOL */
2313
2314 wait_on_page_writeback(page);
2315}
2316EXPORT_SYMBOL_GPL(wait_for_stable_page);