diff options
author | Trond Myklebust <Trond.Myklebust@netapp.com> | 2007-10-18 17:08:05 -0400 |
---|---|---|
committer | Trond Myklebust <Trond.Myklebust@netapp.com> | 2007-10-19 17:18:57 -0400 |
commit | 61e930a904966cc37e0a3404276f0b73037e57ca (patch) | |
tree | 65553920bd6a471c2bae5a35ed3b4cee8bdf1258 | |
parent | 4fa4d23fa20de67df919030c1216295664866ad7 (diff) |
NFS: Fix a writeback race...
This patch fixes a regression that was introduced by commit
44dd151d5c21234cc534c47d7382f5c28c3143cd
We cannot zero the user page in nfs_mark_uptodate() any more, since
a) We'd be modifying the page without holding the page lock
b) We can race with other updates of the page, most notably
because of the call to nfs_wb_page() in nfs_writepage_setup().
Instead, we do the zeroing in nfs_update_request() if we see that we're
creating a request that might potentially be marked as up to date.
Thanks to Olivier Paquet for reporting the bug and providing a test-case.
Signed-off-by: Trond Myklebust <Trond.Myklebust@netapp.com>
-rw-r--r-- | fs/nfs/write.c | 17 |
1 files changed, 13 insertions, 4 deletions
diff --git a/fs/nfs/write.c b/fs/nfs/write.c index 0cf9d1cd9bd2..89527a487ed7 100644 --- a/fs/nfs/write.c +++ b/fs/nfs/write.c | |||
@@ -174,8 +174,6 @@ static void nfs_mark_uptodate(struct page *page, unsigned int base, unsigned int | |||
174 | return; | 174 | return; |
175 | if (count != nfs_page_length(page)) | 175 | if (count != nfs_page_length(page)) |
176 | return; | 176 | return; |
177 | if (count != PAGE_CACHE_SIZE) | ||
178 | zero_user_page(page, count, PAGE_CACHE_SIZE - count, KM_USER0); | ||
179 | SetPageUptodate(page); | 177 | SetPageUptodate(page); |
180 | } | 178 | } |
181 | 179 | ||
@@ -627,7 +625,8 @@ static struct nfs_page * nfs_update_request(struct nfs_open_context* ctx, | |||
627 | return ERR_PTR(error); | 625 | return ERR_PTR(error); |
628 | } | 626 | } |
629 | spin_unlock(&inode->i_lock); | 627 | spin_unlock(&inode->i_lock); |
630 | return new; | 628 | req = new; |
629 | goto zero_page; | ||
631 | } | 630 | } |
632 | spin_unlock(&inode->i_lock); | 631 | spin_unlock(&inode->i_lock); |
633 | 632 | ||
@@ -655,13 +654,23 @@ static struct nfs_page * nfs_update_request(struct nfs_open_context* ctx, | |||
655 | if (offset < req->wb_offset) { | 654 | if (offset < req->wb_offset) { |
656 | req->wb_offset = offset; | 655 | req->wb_offset = offset; |
657 | req->wb_pgbase = offset; | 656 | req->wb_pgbase = offset; |
658 | req->wb_bytes = rqend - req->wb_offset; | 657 | req->wb_bytes = max(end, rqend) - req->wb_offset; |
658 | goto zero_page; | ||
659 | } | 659 | } |
660 | 660 | ||
661 | if (end > rqend) | 661 | if (end > rqend) |
662 | req->wb_bytes = end - req->wb_offset; | 662 | req->wb_bytes = end - req->wb_offset; |
663 | 663 | ||
664 | return req; | 664 | return req; |
665 | zero_page: | ||
666 | /* If this page might potentially be marked as up to date, | ||
667 | * then we need to zero any uninitalised data. */ | ||
668 | if (req->wb_pgbase == 0 && req->wb_bytes != PAGE_CACHE_SIZE | ||
669 | && !PageUptodate(req->wb_page)) | ||
670 | zero_user_page(req->wb_page, req->wb_bytes, | ||
671 | PAGE_CACHE_SIZE - req->wb_bytes, | ||
672 | KM_USER0); | ||
673 | return req; | ||
665 | } | 674 | } |
666 | 675 | ||
667 | int nfs_flush_incompatible(struct file *file, struct page *page) | 676 | int nfs_flush_incompatible(struct file *file, struct page *page) |