aboutsummaryrefslogtreecommitdiffstats
path: root/mm/vmscan.c
diff options
context:
space:
mode:
Diffstat (limited to 'mm/vmscan.c')
-rw-r--r--mm/vmscan.c30
1 files changed, 25 insertions, 5 deletions
diff --git a/mm/vmscan.c b/mm/vmscan.c
index 87779dda4ec6..eca70310adb2 100644
--- a/mm/vmscan.c
+++ b/mm/vmscan.c
@@ -19,6 +19,7 @@
19#include <linux/pagemap.h> 19#include <linux/pagemap.h>
20#include <linux/init.h> 20#include <linux/init.h>
21#include <linux/highmem.h> 21#include <linux/highmem.h>
22#include <linux/vmstat.h>
22#include <linux/file.h> 23#include <linux/file.h>
23#include <linux/writeback.h> 24#include <linux/writeback.h>
24#include <linux/blkdev.h> 25#include <linux/blkdev.h>
@@ -370,7 +371,7 @@ static pageout_t pageout(struct page *page, struct address_space *mapping)
370 /* synchronous write or broken a_ops? */ 371 /* synchronous write or broken a_ops? */
371 ClearPageReclaim(page); 372 ClearPageReclaim(page);
372 } 373 }
373 374 inc_zone_page_state(page, NR_VMSCAN_WRITE);
374 return PAGE_SUCCESS; 375 return PAGE_SUCCESS;
375 } 376 }
376 377
@@ -383,11 +384,30 @@ int remove_mapping(struct address_space *mapping, struct page *page)
383 BUG_ON(mapping != page_mapping(page)); 384 BUG_ON(mapping != page_mapping(page));
384 385
385 write_lock_irq(&mapping->tree_lock); 386 write_lock_irq(&mapping->tree_lock);
386
387 /* 387 /*
388 * The non-racy check for busy page. It is critical to check 388 * The non racy check for a busy page.
389 * PageDirty _after_ making sure that the page is freeable and 389 *
390 * not in use by anybody. (pagecache + us == 2) 390 * Must be careful with the order of the tests. When someone has
391 * a ref to the page, it may be possible that they dirty it then
392 * drop the reference. So if PageDirty is tested before page_count
393 * here, then the following race may occur:
394 *
395 * get_user_pages(&page);
396 * [user mapping goes away]
397 * write_to(page);
398 * !PageDirty(page) [good]
399 * SetPageDirty(page);
400 * put_page(page);
401 * !page_count(page) [good, discard it]
402 *
403 * [oops, our write_to data is lost]
404 *
405 * Reversing the order of the tests ensures such a situation cannot
406 * escape unnoticed. The smp_rmb is needed to ensure the page->flags
407 * load is not satisfied before that of page->_count.
408 *
409 * Note that if SetPageDirty is always performed via set_page_dirty,
410 * and thus under tree_lock, then this ordering is not required.
391 */ 411 */
392 if (unlikely(page_count(page) != 2)) 412 if (unlikely(page_count(page) != 2))
393 goto cannot_free; 413 goto cannot_free;