aboutsummaryrefslogtreecommitdiffstats
path: root/fs/nfs/write.c
diff options
context:
space:
mode:
authorTrond Myklebust <Trond.Myklebust@netapp.com>2005-06-22 13:16:30 -0400
committerTrond Myklebust <Trond.Myklebust@netapp.com>2005-06-22 16:07:38 -0400
commitab0a3dbedc51037f3d2e22ef67717a987b3d15e2 (patch)
treebda40d85d59a729fb7a9c2573a43d1820df9de3c /fs/nfs/write.c
parentfe51beecc55d0b0dce289e4758e7c529a642f63e (diff)
[PATCH] NFS: Write optimization for short files and small O_SYNC writes.
Use stable writes if we can see that we are only going to put a single write on the wire. Signed-off-by: Trond Myklebust <Trond.Myklebust@netapp.com>
Diffstat (limited to 'fs/nfs/write.c')
-rw-r--r--fs/nfs/write.c14
1 files changed, 11 insertions, 3 deletions
diff --git a/fs/nfs/write.c b/fs/nfs/write.c
index c574d551f029..79b621a545b2 100644
--- a/fs/nfs/write.c
+++ b/fs/nfs/write.c
@@ -750,7 +750,7 @@ int nfs_updatepage(struct file *file, struct page *page,
750 * is entirely in cache, it may be more efficient to avoid 750 * is entirely in cache, it may be more efficient to avoid
751 * fragmenting write requests. 751 * fragmenting write requests.
752 */ 752 */
753 if (PageUptodate(page) && inode->i_flock == NULL) { 753 if (PageUptodate(page) && inode->i_flock == NULL && !(file->f_mode & O_SYNC)) {
754 loff_t end_offs = i_size_read(inode) - 1; 754 loff_t end_offs = i_size_read(inode) - 1;
755 unsigned long end_index = end_offs >> PAGE_CACHE_SHIFT; 755 unsigned long end_index = end_offs >> PAGE_CACHE_SHIFT;
756 756
@@ -1342,8 +1342,16 @@ static int nfs_flush_inode(struct inode *inode, unsigned long idx_start,
1342 spin_lock(&nfsi->req_lock); 1342 spin_lock(&nfsi->req_lock);
1343 res = nfs_scan_dirty(inode, &head, idx_start, npages); 1343 res = nfs_scan_dirty(inode, &head, idx_start, npages);
1344 spin_unlock(&nfsi->req_lock); 1344 spin_unlock(&nfsi->req_lock);
1345 if (res) 1345 if (res) {
1346 error = nfs_flush_list(&head, NFS_SERVER(inode)->wpages, how); 1346 struct nfs_server *server = NFS_SERVER(inode);
1347
1348 /* For single writes, FLUSH_STABLE is more efficient */
1349 if (res == nfsi->npages && nfsi->npages <= server->wpages) {
1350 if (res > 1 || nfs_list_entry(head.next)->wb_bytes <= server->wsize)
1351 how |= FLUSH_STABLE;
1352 }
1353 error = nfs_flush_list(&head, server->wpages, how);
1354 }
1347 if (error < 0) 1355 if (error < 0)
1348 return error; 1356 return error;
1349 return res; 1357 return res;