diff options
author | Nick Piggin <npiggin@suse.de> | 2007-07-19 04:47:22 -0400 |
---|---|---|
committer | Linus Torvalds <torvalds@woody.linux-foundation.org> | 2007-07-19 13:04:41 -0400 |
commit | 79352894b28550ee0eee919149f57626ec1b3572 (patch) | |
tree | 849e6aa148c69b9df3920199255ca14792eeffa2 /mm/page-writeback.c | |
parent | 83c54070ee1a2d05c89793884bea1a03f2851ed4 (diff) |
mm: fix clear_page_dirty_for_io vs fault race
Fix msync data loss and (less importantly) dirty page accounting
inaccuracies due to the race remaining in clear_page_dirty_for_io().
The deleted comment explains what the race was, and the added comments
explain how it is fixed.
Signed-off-by: Nick Piggin <npiggin@suse.de>
Acked-by: Linus Torvalds <torvalds@linux-foundation.org>
Cc: Miklos Szeredi <miklos@szeredi.hu>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
Diffstat (limited to 'mm/page-writeback.c')
-rw-r--r-- | mm/page-writeback.c | 17 |
1 files changed, 12 insertions, 5 deletions
diff --git a/mm/page-writeback.c b/mm/page-writeback.c index 886ea0d5a136..e62482718012 100644 --- a/mm/page-writeback.c +++ b/mm/page-writeback.c | |||
@@ -918,6 +918,8 @@ int clear_page_dirty_for_io(struct page *page) | |||
918 | { | 918 | { |
919 | struct address_space *mapping = page_mapping(page); | 919 | struct address_space *mapping = page_mapping(page); |
920 | 920 | ||
921 | BUG_ON(!PageLocked(page)); | ||
922 | |||
921 | if (mapping && mapping_cap_account_dirty(mapping)) { | 923 | if (mapping && mapping_cap_account_dirty(mapping)) { |
922 | /* | 924 | /* |
923 | * Yes, Virginia, this is indeed insane. | 925 | * Yes, Virginia, this is indeed insane. |
@@ -943,14 +945,19 @@ int clear_page_dirty_for_io(struct page *page) | |||
943 | * We basically use the page "master dirty bit" | 945 | * We basically use the page "master dirty bit" |
944 | * as a serialization point for all the different | 946 | * as a serialization point for all the different |
945 | * threads doing their things. | 947 | * threads doing their things. |
946 | * | ||
947 | * FIXME! We still have a race here: if somebody | ||
948 | * adds the page back to the page tables in | ||
949 | * between the "page_mkclean()" and the "TestClearPageDirty()", | ||
950 | * we might have it mapped without the dirty bit set. | ||
951 | */ | 948 | */ |
952 | if (page_mkclean(page)) | 949 | if (page_mkclean(page)) |
953 | set_page_dirty(page); | 950 | set_page_dirty(page); |
951 | /* | ||
952 | * We carefully synchronise fault handlers against | ||
953 | * installing a dirty pte and marking the page dirty | ||
954 | * at this point. We do this by having them hold the | ||
955 | * page lock at some point after installing their | ||
956 | * pte, but before marking the page dirty. | ||
957 | * Pages are always locked coming in here, so we get | ||
958 | * the desired exclusion. See mm/memory.c:do_wp_page() | ||
959 | * for more comments. | ||
960 | */ | ||
954 | if (TestClearPageDirty(page)) { | 961 | if (TestClearPageDirty(page)) { |
955 | dec_zone_page_state(page, NR_FILE_DIRTY); | 962 | dec_zone_page_state(page, NR_FILE_DIRTY); |
956 | return 1; | 963 | return 1; |