diff options
| -rw-r--r-- | fs/nfs/write.c | 14 |
1 files changed, 11 insertions, 3 deletions
diff --git a/fs/nfs/write.c b/fs/nfs/write.c index c574d551f029..79b621a545b2 100644 --- a/fs/nfs/write.c +++ b/fs/nfs/write.c | |||
| @@ -750,7 +750,7 @@ int nfs_updatepage(struct file *file, struct page *page, | |||
| 750 | * is entirely in cache, it may be more efficient to avoid | 750 | * is entirely in cache, it may be more efficient to avoid |
| 751 | * fragmenting write requests. | 751 | * fragmenting write requests. |
| 752 | */ | 752 | */ |
| 753 | if (PageUptodate(page) && inode->i_flock == NULL) { | 753 | if (PageUptodate(page) && inode->i_flock == NULL && !(file->f_mode & O_SYNC)) { |
| 754 | loff_t end_offs = i_size_read(inode) - 1; | 754 | loff_t end_offs = i_size_read(inode) - 1; |
| 755 | unsigned long end_index = end_offs >> PAGE_CACHE_SHIFT; | 755 | unsigned long end_index = end_offs >> PAGE_CACHE_SHIFT; |
| 756 | 756 | ||
| @@ -1342,8 +1342,16 @@ static int nfs_flush_inode(struct inode *inode, unsigned long idx_start, | |||
| 1342 | spin_lock(&nfsi->req_lock); | 1342 | spin_lock(&nfsi->req_lock); |
| 1343 | res = nfs_scan_dirty(inode, &head, idx_start, npages); | 1343 | res = nfs_scan_dirty(inode, &head, idx_start, npages); |
| 1344 | spin_unlock(&nfsi->req_lock); | 1344 | spin_unlock(&nfsi->req_lock); |
| 1345 | if (res) | 1345 | if (res) { |
| 1346 | error = nfs_flush_list(&head, NFS_SERVER(inode)->wpages, how); | 1346 | struct nfs_server *server = NFS_SERVER(inode); |
| 1347 | |||
| 1348 | /* For single writes, FLUSH_STABLE is more efficient */ | ||
| 1349 | if (res == nfsi->npages && nfsi->npages <= server->wpages) { | ||
| 1350 | if (res > 1 || nfs_list_entry(head.next)->wb_bytes <= server->wsize) | ||
| 1351 | how |= FLUSH_STABLE; | ||
| 1352 | } | ||
| 1353 | error = nfs_flush_list(&head, server->wpages, how); | ||
| 1354 | } | ||
| 1347 | if (error < 0) | 1355 | if (error < 0) |
| 1348 | return error; | 1356 | return error; |
| 1349 | return res; | 1357 | return res; |
