diff options
author | Nick Piggin <npiggin@suse.de> | 2007-07-19 04:47:22 -0400 |
---|---|---|
committer | Linus Torvalds <torvalds@woody.linux-foundation.org> | 2007-07-19 13:04:41 -0400 |
commit | 79352894b28550ee0eee919149f57626ec1b3572 (patch) | |
tree | 849e6aa148c69b9df3920199255ca14792eeffa2 | |
parent | 83c54070ee1a2d05c89793884bea1a03f2851ed4 (diff) |
mm: fix clear_page_dirty_for_io vs fault race
Fix msync data loss and (less importantly) dirty page accounting
inaccuracies due to the race remaining in clear_page_dirty_for_io().
The deleted comment explains what the race was, and the added comments
explain how it is fixed.
Signed-off-by: Nick Piggin <npiggin@suse.de>
Acked-by: Linus Torvalds <torvalds@linux-foundation.org>
Cc: Miklos Szeredi <miklos@szeredi.hu>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
-rw-r--r-- | mm/memory.c | 9 | ||||
-rw-r--r-- | mm/page-writeback.c | 17 |
2 files changed, 21 insertions, 5 deletions
diff --git a/mm/memory.c b/mm/memory.c index 61d51da7e17c..50dd3d1f4d18 100644 --- a/mm/memory.c +++ b/mm/memory.c | |||
@@ -1765,6 +1765,15 @@ gotten: | |||
1765 | unlock: | 1765 | unlock: |
1766 | pte_unmap_unlock(page_table, ptl); | 1766 | pte_unmap_unlock(page_table, ptl); |
1767 | if (dirty_page) { | 1767 | if (dirty_page) { |
1768 | /* | ||
1769 | * Yes, Virginia, this is actually required to prevent a race | ||
1770 | * with clear_page_dirty_for_io() from clearing the page dirty | ||
1771 | * bit after it clear all dirty ptes, but before a racing | ||
1772 | * do_wp_page installs a dirty pte. | ||
1773 | * | ||
1774 | * do_no_page is protected similarly. | ||
1775 | */ | ||
1776 | wait_on_page_locked(dirty_page); | ||
1768 | set_page_dirty_balance(dirty_page); | 1777 | set_page_dirty_balance(dirty_page); |
1769 | put_page(dirty_page); | 1778 | put_page(dirty_page); |
1770 | } | 1779 | } |
diff --git a/mm/page-writeback.c b/mm/page-writeback.c index 886ea0d5a136..e62482718012 100644 --- a/mm/page-writeback.c +++ b/mm/page-writeback.c | |||
@@ -918,6 +918,8 @@ int clear_page_dirty_for_io(struct page *page) | |||
918 | { | 918 | { |
919 | struct address_space *mapping = page_mapping(page); | 919 | struct address_space *mapping = page_mapping(page); |
920 | 920 | ||
921 | BUG_ON(!PageLocked(page)); | ||
922 | |||
921 | if (mapping && mapping_cap_account_dirty(mapping)) { | 923 | if (mapping && mapping_cap_account_dirty(mapping)) { |
922 | /* | 924 | /* |
923 | * Yes, Virginia, this is indeed insane. | 925 | * Yes, Virginia, this is indeed insane. |
@@ -943,14 +945,19 @@ int clear_page_dirty_for_io(struct page *page) | |||
943 | * We basically use the page "master dirty bit" | 945 | * We basically use the page "master dirty bit" |
944 | * as a serialization point for all the different | 946 | * as a serialization point for all the different |
945 | * threads doing their things. | 947 | * threads doing their things. |
946 | * | ||
947 | * FIXME! We still have a race here: if somebody | ||
948 | * adds the page back to the page tables in | ||
949 | * between the "page_mkclean()" and the "TestClearPageDirty()", | ||
950 | * we might have it mapped without the dirty bit set. | ||
951 | */ | 948 | */ |
952 | if (page_mkclean(page)) | 949 | if (page_mkclean(page)) |
953 | set_page_dirty(page); | 950 | set_page_dirty(page); |
951 | /* | ||
952 | * We carefully synchronise fault handlers against | ||
953 | * installing a dirty pte and marking the page dirty | ||
954 | * at this point. We do this by having them hold the | ||
955 | * page lock at some point after installing their | ||
956 | * pte, but before marking the page dirty. | ||
957 | * Pages are always locked coming in here, so we get | ||
958 | * the desired exclusion. See mm/memory.c:do_wp_page() | ||
959 | * for more comments. | ||
960 | */ | ||
954 | if (TestClearPageDirty(page)) { | 961 | if (TestClearPageDirty(page)) { |
955 | dec_zone_page_state(page, NR_FILE_DIRTY); | 962 | dec_zone_page_state(page, NR_FILE_DIRTY); |
956 | return 1; | 963 | return 1; |