diff options
author | Trond Myklebust <Trond.Myklebust@netapp.com> | 2008-06-10 18:31:00 -0400 |
---|---|---|
committer | Trond Myklebust <Trond.Myklebust@netapp.com> | 2008-07-09 12:08:45 -0400 |
commit | efc91ed0191e3fc62bb1c556ac93fc4e661214d2 (patch) | |
tree | 291dba382da5d609c5bd35b5e369324ecbb95c00 /fs/nfs/write.c | |
parent | b390c2b55c830eb3b64633fa8d8b8837e073e458 (diff) |
NFS: Optimise append writes with holes
If a file is being extended, and we're creating a hole, we might as well
declare the entire page to be up to date.
This patch significantly improves the write performance for sparse files
in the case where lseek(SEEK_END) is used to append several non-contiguous
writes at intervals of < PAGE_SIZE.
Signed-off-by: Trond Myklebust <Trond.Myklebust@netapp.com>
Diffstat (limited to 'fs/nfs/write.c')
-rw-r--r-- | fs/nfs/write.c | 12 |
1 files changed, 3 insertions, 9 deletions
diff --git a/fs/nfs/write.c b/fs/nfs/write.c index dc62bc504693..eea2d2b5278c 100644 --- a/fs/nfs/write.c +++ b/fs/nfs/write.c | |||
@@ -616,7 +616,7 @@ static struct nfs_page * nfs_update_request(struct nfs_open_context* ctx, | |||
616 | spin_unlock(&inode->i_lock); | 616 | spin_unlock(&inode->i_lock); |
617 | radix_tree_preload_end(); | 617 | radix_tree_preload_end(); |
618 | req = new; | 618 | req = new; |
619 | goto zero_page; | 619 | goto out; |
620 | } | 620 | } |
621 | spin_unlock(&inode->i_lock); | 621 | spin_unlock(&inode->i_lock); |
622 | 622 | ||
@@ -649,19 +649,13 @@ static struct nfs_page * nfs_update_request(struct nfs_open_context* ctx, | |||
649 | req->wb_offset = offset; | 649 | req->wb_offset = offset; |
650 | req->wb_pgbase = offset; | 650 | req->wb_pgbase = offset; |
651 | req->wb_bytes = max(end, rqend) - req->wb_offset; | 651 | req->wb_bytes = max(end, rqend) - req->wb_offset; |
652 | goto zero_page; | 652 | goto out; |
653 | } | 653 | } |
654 | 654 | ||
655 | if (end > rqend) | 655 | if (end > rqend) |
656 | req->wb_bytes = end - req->wb_offset; | 656 | req->wb_bytes = end - req->wb_offset; |
657 | 657 | ||
658 | return req; | 658 | out: |
659 | zero_page: | ||
660 | /* If this page might potentially be marked as up to date, | ||
661 | * then we need to zero any uninitalised data. */ | ||
662 | if (req->wb_pgbase == 0 && req->wb_bytes != PAGE_CACHE_SIZE | ||
663 | && !PageUptodate(req->wb_page)) | ||
664 | zero_user_segment(req->wb_page, req->wb_bytes, PAGE_CACHE_SIZE); | ||
665 | return req; | 659 | return req; |
666 | } | 660 | } |
667 | 661 | ||