diff options
author | Darrick J. Wong <djwong@us.ibm.com> | 2011-05-18 13:55:20 -0400 |
---|---|---|
committer | Theodore Ts'o <tytso@mit.edu> | 2011-05-18 13:55:20 -0400 |
commit | 0e499890c1fd9e0a1bed02002161c4c7873d7489 (patch) | |
tree | 9d45cdd242f399837fd208c39e964ef022703e54 /fs/ext4 | |
parent | 7cb1a5351da8ac499d965a78e94c79ad27891f43 (diff) |
ext4: wait for writeback to complete while making pages writable
In order to stabilize pages during disk writes, ext4_page_mkwrite must
wait for writeback operations to complete before making a page
writable. Furthermore, the function must return locked pages, and
recheck the writeback status if the page lock is ever dropped. The
"someone could wander in" part of this patch was suggested by Chris
Mason.
Signed-off-by: Darrick J. Wong <djwong@us.ibm.com>
Signed-off-by: "Theodore Ts'o" <tytso@mit.edu>
Diffstat (limited to 'fs/ext4')
-rw-r--r-- | fs/ext4/inode.c | 24 |
1 files changed, 19 insertions, 5 deletions
diff --git a/fs/ext4/inode.c b/fs/ext4/inode.c index f6caaac83731..6348c1f610c2 100644 --- a/fs/ext4/inode.c +++ b/fs/ext4/inode.c | |||
@@ -5809,15 +5809,19 @@ int ext4_page_mkwrite(struct vm_area_struct *vma, struct vm_fault *vmf) | |||
5809 | goto out_unlock; | 5809 | goto out_unlock; |
5810 | } | 5810 | } |
5811 | ret = 0; | 5811 | ret = 0; |
5812 | if (PageMappedToDisk(page)) | 5812 | |
5813 | goto out_unlock; | 5813 | lock_page(page); |
5814 | wait_on_page_writeback(page); | ||
5815 | if (PageMappedToDisk(page)) { | ||
5816 | up_read(&inode->i_alloc_sem); | ||
5817 | return VM_FAULT_LOCKED; | ||
5818 | } | ||
5814 | 5819 | ||
5815 | if (page->index == size >> PAGE_CACHE_SHIFT) | 5820 | if (page->index == size >> PAGE_CACHE_SHIFT) |
5816 | len = size & ~PAGE_CACHE_MASK; | 5821 | len = size & ~PAGE_CACHE_MASK; |
5817 | else | 5822 | else |
5818 | len = PAGE_CACHE_SIZE; | 5823 | len = PAGE_CACHE_SIZE; |
5819 | 5824 | ||
5820 | lock_page(page); | ||
5821 | /* | 5825 | /* |
5822 | * return if we have all the buffers mapped. This avoid | 5826 | * return if we have all the buffers mapped. This avoid |
5823 | * the need to call write_begin/write_end which does a | 5827 | * the need to call write_begin/write_end which does a |
@@ -5827,8 +5831,8 @@ int ext4_page_mkwrite(struct vm_area_struct *vma, struct vm_fault *vmf) | |||
5827 | if (page_has_buffers(page)) { | 5831 | if (page_has_buffers(page)) { |
5828 | if (!walk_page_buffers(NULL, page_buffers(page), 0, len, NULL, | 5832 | if (!walk_page_buffers(NULL, page_buffers(page), 0, len, NULL, |
5829 | ext4_bh_unmapped)) { | 5833 | ext4_bh_unmapped)) { |
5830 | unlock_page(page); | 5834 | up_read(&inode->i_alloc_sem); |
5831 | goto out_unlock; | 5835 | return VM_FAULT_LOCKED; |
5832 | } | 5836 | } |
5833 | } | 5837 | } |
5834 | unlock_page(page); | 5838 | unlock_page(page); |
@@ -5848,6 +5852,16 @@ int ext4_page_mkwrite(struct vm_area_struct *vma, struct vm_fault *vmf) | |||
5848 | if (ret < 0) | 5852 | if (ret < 0) |
5849 | goto out_unlock; | 5853 | goto out_unlock; |
5850 | ret = 0; | 5854 | ret = 0; |
5855 | |||
5856 | /* | ||
5857 | * write_begin/end might have created a dirty page and someone | ||
5858 | * could wander in and start the IO. Make sure that hasn't | ||
5859 | * happened. | ||
5860 | */ | ||
5861 | lock_page(page); | ||
5862 | wait_on_page_writeback(page); | ||
5863 | up_read(&inode->i_alloc_sem); | ||
5864 | return VM_FAULT_LOCKED; | ||
5851 | out_unlock: | 5865 | out_unlock: |
5852 | if (ret) | 5866 | if (ret) |
5853 | ret = VM_FAULT_SIGBUS; | 5867 | ret = VM_FAULT_SIGBUS; |