diff options
Diffstat (limited to 'fs/nfs/write.c')
-rw-r--r-- | fs/nfs/write.c | 24 |
1 files changed, 18 insertions, 6 deletions
diff --git a/fs/nfs/write.c b/fs/nfs/write.c index 522efff3e2c5..f55c437124a2 100644 --- a/fs/nfs/write.c +++ b/fs/nfs/write.c | |||
@@ -665,9 +665,7 @@ zero_page: | |||
665 | * then we need to zero any uninitalised data. */ | 665 | * then we need to zero any uninitalised data. */ |
666 | if (req->wb_pgbase == 0 && req->wb_bytes != PAGE_CACHE_SIZE | 666 | if (req->wb_pgbase == 0 && req->wb_bytes != PAGE_CACHE_SIZE |
667 | && !PageUptodate(req->wb_page)) | 667 | && !PageUptodate(req->wb_page)) |
668 | zero_user_page(req->wb_page, req->wb_bytes, | 668 | zero_user_segment(req->wb_page, req->wb_bytes, PAGE_CACHE_SIZE); |
669 | PAGE_CACHE_SIZE - req->wb_bytes, | ||
670 | KM_USER0); | ||
671 | return req; | 669 | return req; |
672 | } | 670 | } |
673 | 671 | ||
@@ -699,6 +697,17 @@ int nfs_flush_incompatible(struct file *file, struct page *page) | |||
699 | } | 697 | } |
700 | 698 | ||
701 | /* | 699 | /* |
700 | * If the page cache is marked as unsafe or invalid, then we can't rely on | ||
701 | * the PageUptodate() flag. In this case, we will need to turn off | ||
702 | * write optimisations that depend on the page contents being correct. | ||
703 | */ | ||
704 | static int nfs_write_pageuptodate(struct page *page, struct inode *inode) | ||
705 | { | ||
706 | return PageUptodate(page) && | ||
707 | !(NFS_I(inode)->cache_validity & (NFS_INO_REVAL_PAGECACHE|NFS_INO_INVALID_DATA)); | ||
708 | } | ||
709 | |||
710 | /* | ||
702 | * Update and possibly write a cached page of an NFS file. | 711 | * Update and possibly write a cached page of an NFS file. |
703 | * | 712 | * |
704 | * XXX: Keep an eye on generic_file_read to make sure it doesn't do bad | 713 | * XXX: Keep an eye on generic_file_read to make sure it doesn't do bad |
@@ -719,10 +728,13 @@ int nfs_updatepage(struct file *file, struct page *page, | |||
719 | (long long)(page_offset(page) +offset)); | 728 | (long long)(page_offset(page) +offset)); |
720 | 729 | ||
721 | /* If we're not using byte range locks, and we know the page | 730 | /* If we're not using byte range locks, and we know the page |
722 | * is entirely in cache, it may be more efficient to avoid | 731 | * is up to date, it may be more efficient to extend the write |
723 | * fragmenting write requests. | 732 | * to cover the entire page in order to avoid fragmentation |
733 | * inefficiencies. | ||
724 | */ | 734 | */ |
725 | if (PageUptodate(page) && inode->i_flock == NULL && !(file->f_mode & O_SYNC)) { | 735 | if (nfs_write_pageuptodate(page, inode) && |
736 | inode->i_flock == NULL && | ||
737 | !(file->f_mode & O_SYNC)) { | ||
726 | count = max(count + offset, nfs_page_length(page)); | 738 | count = max(count + offset, nfs_page_length(page)); |
727 | offset = 0; | 739 | offset = 0; |
728 | } | 740 | } |