diff options
author | Jan Kara <jack@suse.cz> | 2012-06-12 10:20:28 -0400 |
---|---|---|
committer | Al Viro <viro@zeniv.linux.org.uk> | 2012-07-30 17:02:48 -0400 |
commit | 41c4d25f78c01ede13efee1f2e979f3f35dd26f6 (patch) | |
tree | 30eeb0d8beea61b24bda3357997bd288f2bbbd3c /mm | |
parent | 14ae417c6faf28b6e8ec60cc2aa0eaa19453a41c (diff) |
mm: Update file times from fault path only if .page_mkwrite is not set
Filesystems wanting to properly support freezing need to have control
when file_update_time() is called. After pushing file_update_time()
to all relevant .page_mkwrite implementations we can just stop calling
file_update_time() when filesystem implements .page_mkwrite.
Tested-by: Kamal Mostafa <kamal@canonical.com>
Tested-by: Peter M. Petrakis <peter.petrakis@canonical.com>
Tested-by: Dann Frazier <dann.frazier@canonical.com>
Tested-by: Massimo Morana <massimo.morana@canonical.com>
Signed-off-by: Jan Kara <jack@suse.cz>
Signed-off-by: Al Viro <viro@zeniv.linux.org.uk>
Diffstat (limited to 'mm')
-rw-r--r-- | mm/memory.c | 14 |
1 files changed, 7 insertions, 7 deletions
diff --git a/mm/memory.c b/mm/memory.c index 2466d1250231..7c7fa7b4b6b6 100644 --- a/mm/memory.c +++ b/mm/memory.c | |||
@@ -2638,6 +2638,9 @@ reuse: | |||
2638 | if (!page_mkwrite) { | 2638 | if (!page_mkwrite) { |
2639 | wait_on_page_locked(dirty_page); | 2639 | wait_on_page_locked(dirty_page); |
2640 | set_page_dirty_balance(dirty_page, page_mkwrite); | 2640 | set_page_dirty_balance(dirty_page, page_mkwrite); |
2641 | /* file_update_time outside page_lock */ | ||
2642 | if (vma->vm_file) | ||
2643 | file_update_time(vma->vm_file); | ||
2641 | } | 2644 | } |
2642 | put_page(dirty_page); | 2645 | put_page(dirty_page); |
2643 | if (page_mkwrite) { | 2646 | if (page_mkwrite) { |
@@ -2655,10 +2658,6 @@ reuse: | |||
2655 | } | 2658 | } |
2656 | } | 2659 | } |
2657 | 2660 | ||
2658 | /* file_update_time outside page_lock */ | ||
2659 | if (vma->vm_file) | ||
2660 | file_update_time(vma->vm_file); | ||
2661 | |||
2662 | return ret; | 2661 | return ret; |
2663 | } | 2662 | } |
2664 | 2663 | ||
@@ -3327,12 +3326,13 @@ static int __do_fault(struct mm_struct *mm, struct vm_area_struct *vma, | |||
3327 | 3326 | ||
3328 | if (dirty_page) { | 3327 | if (dirty_page) { |
3329 | struct address_space *mapping = page->mapping; | 3328 | struct address_space *mapping = page->mapping; |
3329 | int dirtied = 0; | ||
3330 | 3330 | ||
3331 | if (set_page_dirty(dirty_page)) | 3331 | if (set_page_dirty(dirty_page)) |
3332 | page_mkwrite = 1; | 3332 | dirtied = 1; |
3333 | unlock_page(dirty_page); | 3333 | unlock_page(dirty_page); |
3334 | put_page(dirty_page); | 3334 | put_page(dirty_page); |
3335 | if (page_mkwrite && mapping) { | 3335 | if ((dirtied || page_mkwrite) && mapping) { |
3336 | /* | 3336 | /* |
3337 | * Some device drivers do not set page.mapping but still | 3337 | * Some device drivers do not set page.mapping but still |
3338 | * dirty their pages | 3338 | * dirty their pages |
@@ -3341,7 +3341,7 @@ static int __do_fault(struct mm_struct *mm, struct vm_area_struct *vma, | |||
3341 | } | 3341 | } |
3342 | 3342 | ||
3343 | /* file_update_time outside page_lock */ | 3343 | /* file_update_time outside page_lock */ |
3344 | if (vma->vm_file) | 3344 | if (vma->vm_file && !page_mkwrite) |
3345 | file_update_time(vma->vm_file); | 3345 | file_update_time(vma->vm_file); |
3346 | } else { | 3346 | } else { |
3347 | unlock_page(vmf.page); | 3347 | unlock_page(vmf.page); |